code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _make_pkh_address(pubkey_hash, witness=False, cashaddr=True): ''' bytes, bool -> str ''' addr_bytes = bytearray() if riemann.network.CASHADDR_P2PKH is not None and cashaddr: addr_bytes.extend(riemann.network.CASHADDR_P2PKH) addr_bytes.extend(pubkey_hash) return riemann.network.CASHADDR_ENCODER.encode(addr_bytes) if witness: addr_bytes.extend(riemann.network.P2WPKH_PREFIX) addr_bytes.extend(pubkey_hash) return riemann.network.SEGWIT_ENCODER.encode(addr_bytes) else: addr_bytes.extend(riemann.network.P2PKH_PREFIX) addr_bytes.extend(pubkey_hash) return riemann.network.LEGACY_ENCODER.encode(addr_bytes)
bytes, bool -> str
def get_report(self, value): """Return provided field Python value formatted for use in report filter""" if self.multiselect: value = value or [] children = [] for child in value: children.append(self.cast_to_report(child)) return children return self.cast_to_report(value)
Return provided field Python value formatted for use in report filter
def request(self, persist_id=None): """Cancel an ongoing confirmed commit. Depends on the `:candidate` and `:confirmed-commit` capabilities. *persist-id* value must be equal to the value given in the <persist> parameter to the previous <commit> operation. """ node = new_ele("cancel-commit") if persist_id is not None: sub_ele(node, "persist-id").text = persist_id return self._request(node)
Cancel an ongoing confirmed commit. Depends on the `:candidate` and `:confirmed-commit` capabilities. *persist-id* value must be equal to the value given in the <persist> parameter to the previous <commit> operation.
def plot_spectra_overlapped(ss, title=None, setup=_default_setup): """ Plots one or more spectra in the same plot. Args: ss: list of Spectrum objects title=None: window title setup: PlotSpectrumSetup object """ plt.figure() draw_spectra_overlapped(ss, title, setup) plt.show()
Plots one or more spectra in the same plot. Args: ss: list of Spectrum objects title=None: window title setup: PlotSpectrumSetup object
def attr_case_name(self, name): """Returns preserved case name for case insensitive value of name. Checks first within standard attributes. If not found there, checks attributes for higher order data structures. If not found, returns supplied name as it is available for use. Intended to be used to help ensure that the same case is applied to all repetitions of a given variable name. Parameters ---------- name : str name of variable to get stored case form Returns ------- str name in proper case """ lower_name = name.lower() for i in self.attrs(): if lower_name == i.lower(): return i # check if attribute present in higher order structures for key in self.keys_nD(): for i in self[key].children.attrs(): if lower_name == i.lower(): return i # nothing was found if still here # pass name back, free to be whatever return name
Returns preserved case name for case insensitive value of name. Checks first within standard attributes. If not found there, checks attributes for higher order data structures. If not found, returns supplied name as it is available for use. Intended to be used to help ensure that the same case is applied to all repetitions of a given variable name. Parameters ---------- name : str name of variable to get stored case form Returns ------- str name in proper case
def mmapFile(self, addr, size, perms, filename, offset=0): """ Creates a new file mapping in the memory address space. :param addr: the starting address (took as hint). If C{addr} is C{0} the first big enough chunk of memory will be selected as starting address. :param size: the contents of a file mapping are initialized using C{size} bytes starting at offset C{offset} in the file C{filename}. :param perms: the access permissions to this memory. :param filename: the pathname to the file to map. :param offset: the contents of a file mapping are initialized using C{size} bytes starting at offset C{offset} in the file C{filename}. :return: the starting address where the file was mapped. :rtype: int :raises error: - 'Address shall be concrete' if C{addr} is not an integer number. - 'Address too big' if C{addr} goes beyond the limit of the memory. - 'Map already used' if the piece of memory starting in C{addr} and with length C{size} isn't free. """ # If addr is NULL, the system determines where to allocate the region. assert addr is None or isinstance(addr, int), 'Address shall be concrete' assert size > 0 self.cpu._publish('will_map_memory', addr, size, perms, filename, offset) # address is rounded down to the nearest multiple of the allocation granularity if addr is not None: assert addr < self.memory_size, 'Address too big' addr = self._floor(addr) # size value is rounded up to the next page boundary size = self._ceil(size) # If zero search for a spot addr = self._search(size, addr) # It should not be allocated for i in range(self._page(addr), self._page(addr + size)): assert i not in self._page2map, 'Map already used' # Create the map m = FileMap(addr, size, perms, filename, offset) # Okay, ready to alloc self._add(m) logger.debug('New file-memory map @%x size:%x', addr, size) self.cpu._publish('did_map_memory', addr, size, perms, filename, offset, addr) return addr
Creates a new file mapping in the memory address space. :param addr: the starting address (took as hint). If C{addr} is C{0} the first big enough chunk of memory will be selected as starting address. :param size: the contents of a file mapping are initialized using C{size} bytes starting at offset C{offset} in the file C{filename}. :param perms: the access permissions to this memory. :param filename: the pathname to the file to map. :param offset: the contents of a file mapping are initialized using C{size} bytes starting at offset C{offset} in the file C{filename}. :return: the starting address where the file was mapped. :rtype: int :raises error: - 'Address shall be concrete' if C{addr} is not an integer number. - 'Address too big' if C{addr} goes beyond the limit of the memory. - 'Map already used' if the piece of memory starting in C{addr} and with length C{size} isn't free.
def jacobian(sess, x, grads, target, X, nb_features, nb_classes, feed=None): """ TensorFlow implementation of the foward derivative / Jacobian :param x: the input placeholder :param grads: the list of TF gradients returned by jacobian_graph() :param target: the target misclassification class :param X: numpy array with sample input :param nb_features: the number of features in the input :return: matrix of forward derivatives flattened into vectors """ warnings.warn( "This function is dead code and will be removed on or after 2019-07-18") # Prepare feeding dictionary for all gradient computations feed_dict = {x: X} if feed is not None: feed_dict.update(feed) # Initialize a numpy array to hold the Jacobian component values jacobian_val = np.zeros((nb_classes, nb_features), dtype=np_dtype) # Compute the gradients for all classes for class_ind, grad in enumerate(grads): run_grad = sess.run(grad, feed_dict) jacobian_val[class_ind] = np.reshape(run_grad, (1, nb_features)) # Sum over all classes different from the target class to prepare for # saliency map computation in the next step of the attack other_classes = utils.other_classes(nb_classes, target) grad_others = np.sum(jacobian_val[other_classes, :], axis=0) return jacobian_val[target], grad_others
TensorFlow implementation of the foward derivative / Jacobian :param x: the input placeholder :param grads: the list of TF gradients returned by jacobian_graph() :param target: the target misclassification class :param X: numpy array with sample input :param nb_features: the number of features in the input :return: matrix of forward derivatives flattened into vectors
def set_key(self, key): """Init.""" key_len = len(key) if key_len not in [16, 24, 32]: # XXX: add padding? raise KeyError("key must be 16, 24 or 32 bytes") if key_len % 4: # XXX: add padding? raise KeyError("key not a multiple of 4") if key_len > 32: # XXX: prune? raise KeyError("key_len > 32") self.context = TWI() key_word32 = [0] * 32 i = 0 while key: key_word32[i] = struct.unpack("<L", key[0:4])[0] key = key[4:] i += 1 set_key(self.context, key_word32, key_len)
Init.
def _edges_classify_intersection9(): """The edges for the curved polygon intersection used below. Helper for :func:`classify_intersection9`. """ edges1 = ( bezier.Curve.from_nodes( np.asfortranarray([[32.0, 30.0], [20.0, 25.0]]) ), bezier.Curve.from_nodes( np.asfortranarray([[30.0, 25.0, 20.0], [25.0, 20.0, 20.0]]) ), bezier.Curve.from_nodes( np.asfortranarray([[20.0, 25.0, 30.0], [20.0, 20.0, 15.0]]) ), bezier.Curve.from_nodes( np.asfortranarray([[30.0, 32.0], [15.0, 20.0]]) ), ) edges2 = ( bezier.Curve.from_nodes( np.asfortranarray([[8.0, 10.0], [20.0, 15.0]]) ), bezier.Curve.from_nodes( np.asfortranarray([[10.0, 15.0, 20.0], [15.0, 20.0, 20.0]]) ), bezier.Curve.from_nodes( np.asfortranarray([[20.0, 15.0, 10.0], [20.0, 20.0, 25.0]]) ), bezier.Curve.from_nodes( np.asfortranarray([[10.0, 8.0], [25.0, 20.0]]) ), ) return edges1, edges2
The edges for the curved polygon intersection used below. Helper for :func:`classify_intersection9`.
def interpolate(self, factor, minLayer, maxLayer, round=True, suppressError=True): """ Interpolate all possible data in the layer. :: >>> layer.interpolate(0.5, otherLayer1, otherLayer2) >>> layer.interpolate((0.5, 2.0), otherLayer1, otherLayer2, round=False) The interpolation occurs on a 0 to 1.0 range where **minLayer** is located at 0 and **maxLayer** is located at 1.0. **factor** is the interpolation value. It may be less than 0 and greater than 1.0. It may be a :ref:`type-int-float` or a tuple of two :ref:`type-int-float`. If it is a tuple, the first number indicates the x factor and the second number indicates the y factor. **round** indicates if the result should be rounded to integers. **suppressError** indicates if incompatible data should be ignored or if an error should be raised when such incompatibilities are found. """ factor = normalizers.normalizeInterpolationFactor(factor) if not isinstance(minLayer, BaseLayer): raise TypeError(("Interpolation to an instance of %r can not be " "performed from an instance of %r.") % (self.__class__.__name__, minLayer.__class__.__name__)) if not isinstance(maxLayer, BaseLayer): raise TypeError(("Interpolation to an instance of %r can not be " "performed from an instance of %r.") % (self.__class__.__name__, maxLayer.__class__.__name__)) round = normalizers.normalizeBoolean(round) suppressError = normalizers.normalizeBoolean(suppressError) self._interpolate(factor, minLayer, maxLayer, round=round, suppressError=suppressError)
Interpolate all possible data in the layer. :: >>> layer.interpolate(0.5, otherLayer1, otherLayer2) >>> layer.interpolate((0.5, 2.0), otherLayer1, otherLayer2, round=False) The interpolation occurs on a 0 to 1.0 range where **minLayer** is located at 0 and **maxLayer** is located at 1.0. **factor** is the interpolation value. It may be less than 0 and greater than 1.0. It may be a :ref:`type-int-float` or a tuple of two :ref:`type-int-float`. If it is a tuple, the first number indicates the x factor and the second number indicates the y factor. **round** indicates if the result should be rounded to integers. **suppressError** indicates if incompatible data should be ignored or if an error should be raised when such incompatibilities are found.
def true_num_reactions(model, custom_spont_id=None): """Return the number of reactions associated with a gene. Args: model (Model): custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001`` Returns: int: Number of reactions associated with a gene """ true_num = 0 for rxn in model.reactions: if len(rxn.genes) == 0: continue if len(rxn.genes) == 1 and is_spontaneous(list(rxn.genes)[0], custom_id=custom_spont_id): continue else: true_num += 1 return true_num
Return the number of reactions associated with a gene. Args: model (Model): custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001`` Returns: int: Number of reactions associated with a gene
def mongodump(mongo_user, mongo_password, mongo_dump_directory_path, database=None, silent=False): """ Runs mongodump using the provided credentials on the running mongod process. WARNING: This function will delete the contents of the provided directory before it runs. """ if path.exists(mongo_dump_directory_path): # If a backup dump already exists, delete it rmtree(mongo_dump_directory_path) if silent: dump_command = ("mongodump --quiet -u %s -p %s -o %s" % (mongo_user, mongo_password, mongo_dump_directory_path)) else: dump_command = ("mongodump -u %s -p %s -o %s" % (mongo_user, mongo_password, mongo_dump_directory_path)) if database: dump_command += (" --db %s" % database) call(dump_command, silent=silent)
Runs mongodump using the provided credentials on the running mongod process. WARNING: This function will delete the contents of the provided directory before it runs.
def sanitize_capabilities(caps): """ Sanitize the capabilities we pass to Selenic so that they can be consumed by Browserstack. :param caps: The capabilities passed to Selenic. This dictionary is modified. :returns: The sanitized capabilities. """ platform = caps["platform"] upper_platform = platform.upper() if upper_platform.startswith("WINDOWS 8"): caps["platform"] = "WIN8" elif upper_platform.startswith("OS X "): caps["platform"] = "MAC" elif upper_platform == "WINDOWS 10": del caps["platform"] caps["os"] = "Windows" caps["os_version"] = "10" if caps["browserName"].upper() == "MICROSOFTEDGE": # Sauce Labs takes complete version numbers like # 15.1234. However, Browser Stack takes only .0 numbers like # 15.0. caps["version"] = caps["version"].split(".", 1)[0] + ".0" caps["browser_version"] = caps["version"] del caps["version"] return caps
Sanitize the capabilities we pass to Selenic so that they can be consumed by Browserstack. :param caps: The capabilities passed to Selenic. This dictionary is modified. :returns: The sanitized capabilities.
def choose_parent_view(self, request): """ Instantiates a class-based view to provide a view that allows a parent page to be chosen for a new object, where the assigned model extends Wagtail's Page model, and there is more than one potential parent for new instances. The view class used can be overridden by changing the 'choose_parent_view_class' attribute. """ kwargs = {'model_admin': self} view_class = self.choose_parent_view_class return view_class.as_view(**kwargs)(request)
Instantiates a class-based view to provide a view that allows a parent page to be chosen for a new object, where the assigned model extends Wagtail's Page model, and there is more than one potential parent for new instances. The view class used can be overridden by changing the 'choose_parent_view_class' attribute.
def get_or_create(cls, filter_key=None, with_status=False, **kwargs): """ Convenience method to retrieve an Element or create if it does not exist. If an element does not have a `create` classmethod, then it is considered read-only and the request will be redirected to :meth:`~get`. Any keyword arguments passed except the optional filter_key will be used in a create() call. If filter_key is provided, this should define an attribute and value to use for an exact match on the element. Valid attributes are ones required on the elements ``create`` method or can be viewed by the elements class docs. If no filter_key is provided, the name field will be used to find the element. :: >>> Network.get_or_create( filter_key={'ipv4_network': '123.123.123.0/24'}, name='mynetwork', ipv4_network='123.123.123.0/24') Network(name=mynetwork) The kwargs should be used to satisfy the elements ``create`` classmethod parameters to create in the event it cannot be found. :param dict filter_key: filter key represents the data attribute and value to use to find the element. If none is provided, the name field will be used. :param kwargs: keyword arguments mapping to the elements ``create`` method. :param bool with_status: if set to True, a tuple is returned with (Element, created), where the second tuple item indicates if the element has been created or not. :raises CreateElementFailed: could not create element with reason :raises ElementNotFound: if read-only element does not exist :return: element instance by type :rtype: Element """ was_created = False if 'name' not in kwargs: raise ElementNotFound('Name field is a required parameter ' 'for all create or update_or_create type operations on an element') if filter_key: elements = cls.objects.filter(**filter_key) element = elements.first() if elements.exists() else None else: try: element = cls.get(kwargs.get('name')) except ElementNotFound: if not hasattr(cls, 'create'): raise CreateElementFailed('%s: %r not found and this element ' 'type does not have a create method.' % (cls.__name__, kwargs['name'])) element = None if not element: params = {k: v() if callable(v) else v for k, v in kwargs.items()} try: element = cls.create(**params) was_created = True except TypeError: raise CreateElementFailed('%s: %r not found and missing ' 'constructor arguments to properly create.' % (cls.__name__, kwargs['name'])) if with_status: return element, was_created return element
Convenience method to retrieve an Element or create if it does not exist. If an element does not have a `create` classmethod, then it is considered read-only and the request will be redirected to :meth:`~get`. Any keyword arguments passed except the optional filter_key will be used in a create() call. If filter_key is provided, this should define an attribute and value to use for an exact match on the element. Valid attributes are ones required on the elements ``create`` method or can be viewed by the elements class docs. If no filter_key is provided, the name field will be used to find the element. :: >>> Network.get_or_create( filter_key={'ipv4_network': '123.123.123.0/24'}, name='mynetwork', ipv4_network='123.123.123.0/24') Network(name=mynetwork) The kwargs should be used to satisfy the elements ``create`` classmethod parameters to create in the event it cannot be found. :param dict filter_key: filter key represents the data attribute and value to use to find the element. If none is provided, the name field will be used. :param kwargs: keyword arguments mapping to the elements ``create`` method. :param bool with_status: if set to True, a tuple is returned with (Element, created), where the second tuple item indicates if the element has been created or not. :raises CreateElementFailed: could not create element with reason :raises ElementNotFound: if read-only element does not exist :return: element instance by type :rtype: Element
def process_service_check_result(self, service, return_code, plugin_output): """Process service check result Format of the line that triggers function call:: PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<return_code>;<plugin_output> :param service: service to process check to :type service: alignak.objects.service.Service :param return_code: exit code of plugin :type return_code: int :param plugin_output: plugin output :type plugin_output: str :return: None """ now = time.time() cls = service.__class__ # If globally disabled OR service disabled, do not launch.. if not cls.accept_passive_checks or not service.passive_checks_enabled: return try: plugin_output = plugin_output.decode('utf8', 'ignore') logger.debug('%s > Passive service check plugin output: %s', service.get_full_name(), plugin_output) except AttributeError: # Python 3 will raise an exception pass except UnicodeError: pass # Maybe the check is just too old, if so, bail out! if self.current_timestamp < service.last_chk: logger.debug('%s > Passive service check is too old (%d seconds). ' 'Ignoring, check output: %s', service.get_full_name(), self.current_timestamp < service.last_chk, plugin_output) return # Create a check object from the external command chk = service.launch_check(now, self.hosts, self.services, self.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=True) # Should not be possible to not find the check, but if so, don't crash if not chk: logger.error('%s > Passive service check failed. None check launched !?', service.get_full_name()) return # Now we 'transform the check into a result' # So exit_status, output and status is eaten by the service chk.exit_status = return_code chk.get_outputs(plugin_output, service.max_plugins_output_length) logger.debug('%s > Passive service check output: %s', service.get_full_name(), chk.output) chk.status = ACT_STATUS_WAIT_CONSUME chk.check_time = self.current_timestamp # we are using the external command timestamps # Set the corresponding service's check type to passive chk.set_type_passive() # self.daemon.nb_check_received += 1 self.send_an_element(chk) # Ok now this result will be read by the scheduler the next loop # raise a passive check log only if needed if self.my_conf.log_passive_checks: log_level = 'info' if return_code == 1: # WARNING log_level = 'warning' if return_code == 2: # CRITICAL log_level = 'error' self.send_an_element(make_monitoring_log( log_level, 'PASSIVE SERVICE CHECK: %s;%s;%d;%s;%s;%s' % ( self.hosts[service.host].get_name(), service.get_name(), return_code, chk.output, chk.long_output, chk.perf_data)))
Process service check result Format of the line that triggers function call:: PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<return_code>;<plugin_output> :param service: service to process check to :type service: alignak.objects.service.Service :param return_code: exit code of plugin :type return_code: int :param plugin_output: plugin output :type plugin_output: str :return: None
def grouper(iterable, n, fillvalue=None): """Collect data into fixed-length chunks or blocks. >>> list(grouper('ABCDEFG', 3, 'x')) [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] """ if isinstance(iterable, int): warnings.warn( "grouper expects iterable as first parameter", DeprecationWarning, ) n, iterable = iterable, n args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args)
Collect data into fixed-length chunks or blocks. >>> list(grouper('ABCDEFG', 3, 'x')) [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
def readline(self, raise_exception=False): """Read a line and return it. If "raise_exception" is set, raise _ConnectionDeadError if the read fails, otherwise return an empty string. """ buf = self.buffer if self.socket: recv = self.socket.recv else: recv = lambda bufsize: '' while True: index = buf.find('\r\n') if index >= 0: break data = recv(4096) if not data: # connection close, let's kill it and raise self.mark_dead('connection closed in readline()') if raise_exception: raise _ConnectionDeadError() else: return '' buf += data self.buffer = buf[index+2:] return buf[:index]
Read a line and return it. If "raise_exception" is set, raise _ConnectionDeadError if the read fails, otherwise return an empty string.
def get_path_variables(**kwargs): """ Get the base variables for any view to route to. Currently gets: - `enterprise_uuid` - the UUID of the enterprise customer. - `course_run_id` - the ID of the course, if applicable. - `program_uuid` - the UUID of the program, if applicable. """ enterprise_customer_uuid = kwargs.get('enterprise_uuid', '') course_run_id = kwargs.get('course_id', '') course_key = kwargs.get('course_key', '') program_uuid = kwargs.get('program_uuid', '') return enterprise_customer_uuid, course_run_id, course_key, program_uuid
Get the base variables for any view to route to. Currently gets: - `enterprise_uuid` - the UUID of the enterprise customer. - `course_run_id` - the ID of the course, if applicable. - `program_uuid` - the UUID of the program, if applicable.
def use_comparative_vault_view(self): """The returns from the lookup methods may omit or translate elements based on this session, such as authorization, and not result in an error. This view is used when greater interoperability is desired at the expense of precision. *compliance: mandatory -- This method is must be implemented.* """ # Implemented from template for # osid.resource.BinLookupSession.use_comparative_bin_view self._catalog_view = COMPARATIVE if self._catalog_session is not None: self._catalog_session.use_comparative_catalog_view()
The returns from the lookup methods may omit or translate elements based on this session, such as authorization, and not result in an error. This view is used when greater interoperability is desired at the expense of precision. *compliance: mandatory -- This method is must be implemented.*
def report_device_attributes(self, mode=0, **kwargs): """Report terminal identity. .. versionadded:: 0.5.0 .. versionchanged:: 0.7.0 If ``private`` keyword argument is set, the method does nothing. This behaviour is consistent with VT220 manual. """ # We only implement "primary" DA which is the only DA request # VT102 understood, see ``VT102ID`` in ``linux/drivers/tty/vt.c``. if mode == 0 and not kwargs.get("private"): self.write_process_input(ctrl.CSI + "?6c")
Report terminal identity. .. versionadded:: 0.5.0 .. versionchanged:: 0.7.0 If ``private`` keyword argument is set, the method does nothing. This behaviour is consistent with VT220 manual.
def list_listeners(self, retrieve_all=True, **_params): """Fetches a list of all lbaas_listeners for a project.""" return self.list('listeners', self.lbaas_listeners_path, retrieve_all, **_params)
Fetches a list of all lbaas_listeners for a project.
def add_service(self, name, long_name, preregistered=False, notify=True): """Add a service to the list of tracked services. Args: name (string): A unique short service name for the service long_name (string): A longer, user friendly name for the service preregistered (bool): Whether this service is an expected preregistered service. notify (bool): Send notifications about this service to all clients Returns: awaitable: If notify is True, an awaitable for the notifications. Otherwise None. """ if name in self.services: raise ArgumentError("Could not add service because the long_name is taken", long_name=long_name) serv_state = states.ServiceState(name, long_name, preregistered) service = { 'state': serv_state, 'heartbeat_threshold': 600 } self.services[name] = service if notify: return self._notify_update(name, 'new_service', self.service_info(name)) return None
Add a service to the list of tracked services. Args: name (string): A unique short service name for the service long_name (string): A longer, user friendly name for the service preregistered (bool): Whether this service is an expected preregistered service. notify (bool): Send notifications about this service to all clients Returns: awaitable: If notify is True, an awaitable for the notifications. Otherwise None.
def libvlc_media_player_set_media(p_mi, p_md): '''Set the media that will be used by the media_player. If any, previous md will be released. @param p_mi: the Media Player. @param p_md: the Media. Afterwards the p_md can be safely destroyed. ''' f = _Cfunctions.get('libvlc_media_player_set_media', None) or \ _Cfunction('libvlc_media_player_set_media', ((1,), (1,),), None, None, MediaPlayer, Media) return f(p_mi, p_md)
Set the media that will be used by the media_player. If any, previous md will be released. @param p_mi: the Media Player. @param p_md: the Media. Afterwards the p_md can be safely destroyed.
def enable_thread_safety(self): """Enable thread-safety features. Must be called before start(). """ if self.threadsafe: return # Already done! if self._running.isSet(): raise RuntimeError('Cannot enable thread safety after start') def _getattr(obj, name): # use 'is True' so mock objects don't return true for everything return getattr(obj, name, False) is True for name in dir(self): try: meth = getattr(self, name) except AttributeError: # Subclasses may have computed attributes that don't work # before they are started, so let's ignore those pass if not callable(meth): continue make_threadsafe = _getattr(meth, 'make_threadsafe') make_threadsafe_blocking = _getattr(meth, 'make_threadsafe_blocking') if make_threadsafe: assert not make_threadsafe_blocking meth = self._make_threadsafe(meth) setattr(self, name, meth) elif make_threadsafe_blocking: meth = self._make_threadsafe_blocking(meth) setattr(self, name, meth) self._threadsafe = True
Enable thread-safety features. Must be called before start().
def init_tree(self, tree_alias, context): """Initializes sitetree in memory. Returns tuple with resolved tree alias and items on success. On fail returns (None, None). :param str|unicode tree_alias: :param Context context: :rtype: tuple """ request = context.get('request', None) if request is None: raise SiteTreeError( 'Sitetree requires "django.core.context_processors.request" template context processor to be active. ' 'If it is, check that your view pushes request data into the template.') if id(request) != id(self.current_request): self.init(context) # Resolve tree_alias from the context. tree_alias = self.resolve_var(tree_alias) tree_alias, sitetree_items = self.get_sitetree(tree_alias) if not sitetree_items: return None, None return tree_alias, sitetree_items
Initializes sitetree in memory. Returns tuple with resolved tree alias and items on success. On fail returns (None, None). :param str|unicode tree_alias: :param Context context: :rtype: tuple
def views_show_many(self, ids=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/views#list-views-by-id" api_path = "/api/v2/views/show_many.json" api_query = {} if "query" in kwargs.keys(): api_query.update(kwargs["query"]) del kwargs["query"] if ids: api_query.update({ "ids": ids, }) return self.call(api_path, query=api_query, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/views#list-views-by-id
def open_icmp_firewall(host): """Temporarily open the ICMP firewall. Tricks Windows into allowing ICMP packets for a short period of time (~ 1 minute)""" # We call ping with a timeout of 1ms: will return instantly with open(os.devnull, 'wb') as DEVNULL: return subprocess.Popen("ping -4 -w 1 -n 1 %s" % host, shell=True, stdout=DEVNULL, stderr=DEVNULL).wait()
Temporarily open the ICMP firewall. Tricks Windows into allowing ICMP packets for a short period of time (~ 1 minute)
def load_and_parse(self): """Load and parse archives in a list of projects. Returns a dict that maps unique ids onto ParsedNodes""" archives = [] to_return = {} for name, project in self.all_projects.items(): archives = archives + self.parse_archives_from_project(project) # We're going to have a similar issue with parsed nodes, if we want to # make parse_node return those. for a in archives: # archives have a config, but that would make for an invalid # UnparsedNode, so remove it and pass it along to parse_node as an # argument. archive_config = a.pop('config') archive = UnparsedNode(**a) node_path = self.get_path(archive.resource_type, archive.package_name, archive.name) to_return[node_path] = self.parse_node( archive, node_path, self.all_projects.get(archive.package_name), archive_config=archive_config) return to_return
Load and parse archives in a list of projects. Returns a dict that maps unique ids onto ParsedNodes
def download(directory, filename): """Download (and unzip) a file from the MNIST dataset if not already done.""" filepath = os.path.join(directory, filename) if tf.gfile.Exists(filepath): return filepath if not tf.gfile.Exists(directory): tf.gfile.MakeDirs(directory) url = 'http://yann.lecun.com/exdb/mnist/' + filename + '.gz' _, zipped_filepath = tempfile.mkstemp(suffix='.gz') print('Downloading %s to %s' % (url, zipped_filepath)) urllib.request.urlretrieve(url, zipped_filepath) with gzip.open(zipped_filepath, 'rb') as f_in, \ tf.gfile.Open(filepath, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.remove(zipped_filepath) return filepath
Download (and unzip) a file from the MNIST dataset if not already done.
def list_metrics(ctx): """List the available metrics.""" config = ctx.obj["CONFIG"] if not exists(config): handle_no_cache(ctx) from wily.commands.list_metrics import list_metrics list_metrics()
List the available metrics.
def plot(self, origin=(0, 0), ax=None, fill=False, **kwargs): """ Plot the `BoundingBox` on a matplotlib `~matplotlib.axes.Axes` instance. Parameters ---------- origin : array_like, optional The ``(x, y)`` position of the origin of the displayed image. ax : `matplotlib.axes.Axes` instance, optional If `None`, then the current `~matplotlib.axes.Axes` instance is used. fill : bool, optional Set whether to fill the aperture patch. The default is `False`. kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`. """ aper = self.to_aperture() aper.plot(origin=origin, ax=ax, fill=fill, **kwargs)
Plot the `BoundingBox` on a matplotlib `~matplotlib.axes.Axes` instance. Parameters ---------- origin : array_like, optional The ``(x, y)`` position of the origin of the displayed image. ax : `matplotlib.axes.Axes` instance, optional If `None`, then the current `~matplotlib.axes.Axes` instance is used. fill : bool, optional Set whether to fill the aperture patch. The default is `False`. kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`.
def return_or_raise(cls, response, expected_status_code): """Check for ``expected_status_code``.""" try: if response.status_code in expected_status_code: return response except TypeError: if response.status_code == expected_status_code: return response raise cls(response)
Check for ``expected_status_code``.
def create(self, start_date, end_date, include_subaccounts=values.unset, status_callback=values.unset, status_callback_method=values.unset): """ Create a new FeedbackSummaryInstance :param date start_date: Only include feedback given on or after this date :param date end_date: Only include feedback given on or before this date :param bool include_subaccounts: `true` includes feedback from the specified account and its subaccounts :param unicode status_callback: The URL that we will request when the feedback summary is complete :param unicode status_callback_method: The HTTP method we use to make requests to the StatusCallback URL :returns: Newly created FeedbackSummaryInstance :rtype: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryInstance """ data = values.of({ 'StartDate': serialize.iso8601_date(start_date), 'EndDate': serialize.iso8601_date(end_date), 'IncludeSubaccounts': include_subaccounts, 'StatusCallback': status_callback, 'StatusCallbackMethod': status_callback_method, }) payload = self._version.create( 'POST', self._uri, data=data, ) return FeedbackSummaryInstance(self._version, payload, account_sid=self._solution['account_sid'], )
Create a new FeedbackSummaryInstance :param date start_date: Only include feedback given on or after this date :param date end_date: Only include feedback given on or before this date :param bool include_subaccounts: `true` includes feedback from the specified account and its subaccounts :param unicode status_callback: The URL that we will request when the feedback summary is complete :param unicode status_callback_method: The HTTP method we use to make requests to the StatusCallback URL :returns: Newly created FeedbackSummaryInstance :rtype: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryInstance
def iter_columns(condition): """ Yield all columns in the condition or its inner conditions. Unwraps proxies when the condition's column (or any of its values) include paths. """ # Like iter_conditions, this can't live in each condition without going possibly infinite on the # recursion, or passing the visited set through every call. That makes the signature ugly, so we # take care of it here. Luckily, it's pretty easy to leverage iter_conditions and just unpack the # actual columns. visited = set() for condition in iter_conditions(condition): if condition.operation in ("and", "or", "not"): continue # Non-meta conditions always have a column, and each of values has the potential to be a column. # Comparison will only have a list of len 1, but it's simpler to just iterate values and check each # unwrap proxies created for paths column = proxied(condition.column) # special case for None # this could also have skipped on isinstance(condition, Condition) # but this is slightly more flexible for users to create their own None-sentinel Conditions if column is None: continue if column not in visited: visited.add(column) yield column for value in condition.values: if isinstance(value, ComparisonMixin): if value not in visited: visited.add(value) yield value
Yield all columns in the condition or its inner conditions. Unwraps proxies when the condition's column (or any of its values) include paths.
def get_collection(self, name): '''get a collection, if it exists, otherwise return None. ''' from sregistry.database.models import Collection return Collection.query.filter(Collection.name == name).first()
get a collection, if it exists, otherwise return None.
def GetRootKey(self): """Retrieves the root key. Returns: WinRegistryKey: Windows Registry root key or None if not available. """ regf_key = self._regf_file.get_root_key() if not regf_key: return None return REGFWinRegistryKey(regf_key, key_path=self._key_path_prefix)
Retrieves the root key. Returns: WinRegistryKey: Windows Registry root key or None if not available.
async def request(self, method, url=None, *, path='', retries=1, connection_timeout=60, **kwargs): ''' This is the template for all of the `http method` methods for the Session. Args: method (str): A http method, such as 'GET' or 'POST'. url (str): The url the request should be made to. path (str): An optional kw-arg for use in Session method calls, for specifying a particular path. Usually to be used in conjunction with the base_location/endpoint paradigm. kwargs: Any number of the following: data (dict or str): Info to be processed as a body-bound query. params (dict or str): Info to be processed as a url-bound query. headers (dict): User HTTP headers to be used in the request. encoding (str): The str representation of the codec to process the request under. json (dict): A dict to be formatted as json and sent in the request body. files (dict): A dict of `filename:filepath`s to be sent as multipart. cookies (dict): A dict of `name:value` cookies to be passed in request. callback (func): A callback function to be called on each bytechunk of of the response body. timeout (int or float): A numeric representation of the longest time to wait on a complete response once a request has been sent. retries (int): The number of attempts to try against connection errors. max_redirects (int): The maximum number of redirects allowed. persist_cookies (True or None): Passing True instantiates a CookieTracker object to manage the return of cookies to the server under the relevant domains. auth (child of AuthBase): An object for handling auth construction. When you call something like Session.get() or asks.post(), you're really calling a partial method that has the 'method' argument pre-completed. ''' timeout = kwargs.get('timeout', None) req_headers = kwargs.pop('headers', None) if self.headers is not None: headers = copy(self.headers) if req_headers is not None: headers.update(req_headers) req_headers = headers async with self.sema: if url is None: url = self._make_url() + path retry = False sock = None try: sock = await timeout_manager( connection_timeout, self._grab_connection, url) port = sock.port req_obj = RequestProcessor( self, method, url, port, headers=req_headers, encoding=self.encoding, sock=sock, persist_cookies=self._cookie_tracker, **kwargs ) try: if timeout is None: sock, r = await req_obj.make_request() else: sock, r = await timeout_manager(timeout, req_obj.make_request) except BadHttpResponse: if timeout is None: sock, r = await req_obj.make_request() else: sock, r = await timeout_manager(timeout, req_obj.make_request) if sock is not None: try: if r.headers['connection'].lower() == 'close': sock._active = False await sock.close() except KeyError: pass await self.return_to_pool(sock) # ConnectionErrors are special. They are the only kind of exception # we ever want to suppress. All other exceptions are re-raised or # raised through another exception. except ConnectionError as e: if retries > 0: retry = True retries -= 1 else: raise e except Exception as e: if sock: await self._handle_exception(e, sock) raise # any BaseException is considered unlawful murder, and # Session.cleanup should be called to tidy up sockets. except BaseException as e: if sock: await sock.close() raise e if retry: return (await self.request(method, url, path=path, retries=retries, headers=headers, **kwargs)) return r
This is the template for all of the `http method` methods for the Session. Args: method (str): A http method, such as 'GET' or 'POST'. url (str): The url the request should be made to. path (str): An optional kw-arg for use in Session method calls, for specifying a particular path. Usually to be used in conjunction with the base_location/endpoint paradigm. kwargs: Any number of the following: data (dict or str): Info to be processed as a body-bound query. params (dict or str): Info to be processed as a url-bound query. headers (dict): User HTTP headers to be used in the request. encoding (str): The str representation of the codec to process the request under. json (dict): A dict to be formatted as json and sent in the request body. files (dict): A dict of `filename:filepath`s to be sent as multipart. cookies (dict): A dict of `name:value` cookies to be passed in request. callback (func): A callback function to be called on each bytechunk of of the response body. timeout (int or float): A numeric representation of the longest time to wait on a complete response once a request has been sent. retries (int): The number of attempts to try against connection errors. max_redirects (int): The maximum number of redirects allowed. persist_cookies (True or None): Passing True instantiates a CookieTracker object to manage the return of cookies to the server under the relevant domains. auth (child of AuthBase): An object for handling auth construction. When you call something like Session.get() or asks.post(), you're really calling a partial method that has the 'method' argument pre-completed.
def get_pgid(path, follow_symlinks=True): ''' Return the id of the primary group that owns a given file (Windows only) This function will return the rarely used primary group of a file. This generally has no bearing on permissions unless intentionally configured and is most commonly used to provide Unix compatibility (e.g. Services For Unix, NFS services). Ensure you know what you are doing before using this function. Args: path (str): The path to the file or directory follow_symlinks (bool): If the object specified by ``path`` is a symlink, get attributes of the linked file instead of the symlink itself. Default is True Returns: str: The gid of the primary group CLI Example: .. code-block:: bash salt '*' file.get_pgid c:\\temp\\test.txt ''' if not os.path.exists(path): raise CommandExecutionError('Path not found: {0}'.format(path)) # Under Windows, if the path is a symlink, the user that owns the symlink is # returned, not the user that owns the file/directory the symlink is # pointing to. This behavior is *different* to *nix, therefore the symlink # is first resolved manually if necessary. Remember symlinks are only # supported on Windows Vista or later. if follow_symlinks and sys.getwindowsversion().major >= 6: path = _resolve_symlink(path) group_name = salt.utils.win_dacl.get_primary_group(path) return salt.utils.win_dacl.get_sid_string(group_name)
Return the id of the primary group that owns a given file (Windows only) This function will return the rarely used primary group of a file. This generally has no bearing on permissions unless intentionally configured and is most commonly used to provide Unix compatibility (e.g. Services For Unix, NFS services). Ensure you know what you are doing before using this function. Args: path (str): The path to the file or directory follow_symlinks (bool): If the object specified by ``path`` is a symlink, get attributes of the linked file instead of the symlink itself. Default is True Returns: str: The gid of the primary group CLI Example: .. code-block:: bash salt '*' file.get_pgid c:\\temp\\test.txt
def execute(self, args): """Execute a registered hook based on args[0]""" _run_atstart() hook_name = os.path.basename(args[0]) if hook_name in self._hooks: try: self._hooks[hook_name]() except SystemExit as x: if x.code is None or x.code == 0: _run_atexit() raise _run_atexit() else: raise UnregisteredHookError(hook_name)
Execute a registered hook based on args[0]
def get_graph_data(self, graph, benchmark): """ Iterator over graph data sets Yields ------ param_idx Flat index to parameter permutations for parameterized benchmarks. None if benchmark is not parameterized. entry_name Name for the data set. If benchmark is non-parameterized, this is the benchmark name. steps Steps to consider in regression detection. threshold User-specified threshold for regression detection. """ if benchmark.get('params'): param_iter = enumerate(zip(itertools.product(*benchmark['params']), graph.get_steps())) else: param_iter = [(None, (None, graph.get_steps()))] for j, (param, steps) in param_iter: if param is None: entry_name = benchmark['name'] else: entry_name = benchmark['name'] + '({0})'.format(', '.join(param)) start_revision = self._get_start_revision(graph, benchmark, entry_name) threshold = self._get_threshold(graph, benchmark, entry_name) if start_revision is None: # Skip detection continue steps = [step for step in steps if step[1] >= start_revision] yield j, entry_name, steps, threshold
Iterator over graph data sets Yields ------ param_idx Flat index to parameter permutations for parameterized benchmarks. None if benchmark is not parameterized. entry_name Name for the data set. If benchmark is non-parameterized, this is the benchmark name. steps Steps to consider in regression detection. threshold User-specified threshold for regression detection.
def pop_all(self, priority=None): """ NON-BLOCKING POP ALL IN QUEUE, IF ANY """ output = [] with self.lock: if not priority: priority = self.highest_entry() if priority: output = list(self.queue[priority].queue) self.queue[priority].queue.clear() return output
NON-BLOCKING POP ALL IN QUEUE, IF ANY
def create_lti_session(self, user_id, roles, realname, email, course_id, task_id, consumer_key, outcome_service_url, outcome_result_id, tool_name, tool_desc, tool_url, context_title, context_label): """ Creates an LTI cookieless session. Returns the new session id""" self._destroy_session() # don't forget to destroy the current session (cleans the threaded dict from web.py) self._session.load('') # creates a new cookieless session session_id = self._session.session_id self._session.lti = { "email": email, "username": user_id, "realname": realname, "roles": roles, "task": (course_id, task_id), "outcome_service_url": outcome_service_url, "outcome_result_id": outcome_result_id, "consumer_key": consumer_key, "context_title": context_title, "context_label": context_label, "tool_description": tool_desc, "tool_name": tool_name, "tool_url": tool_url } return session_id
Creates an LTI cookieless session. Returns the new session id
def _SetColour(self, value_list): """Sets row's colour attributes to a list of values in terminal.SGR.""" if value_list is None: self._color = None return colors = [] for color in value_list: if color in terminal.SGR: colors.append(color) elif color in terminal.FG_COLOR_WORDS: colors += terminal.FG_COLOR_WORDS[color] elif color in terminal.BG_COLOR_WORDS: colors += terminal.BG_COLOR_WORDS[color] else: raise ValueError("Invalid colour specification.") self._color = list(set(colors))
Sets row's colour attributes to a list of values in terminal.SGR.
def n_chunks(self): """ rough estimate of how many chunks will be processed """ return self._data_source.n_chunks(self.chunksize, stride=self.stride, skip=self.skip)
rough estimate of how many chunks will be processed
def equivalence_transform(compound, from_positions, to_positions, add_bond=True): """Computes an affine transformation that maps the from_positions to the respective to_positions, and applies this transformation to the compound. Parameters ---------- compound : mb.Compound The Compound to be transformed. from_positions : np.ndarray, shape=(n, 3), dtype=float Original positions. to_positions : np.ndarray, shape=(n, 3), dtype=float New positions. """ warn('The `equivalence_transform` function is being phased out in favor of' ' `force_overlap`.', DeprecationWarning) from mbuild.port import Port T = None if isinstance(from_positions, (list, tuple)) and isinstance(to_positions, (list, tuple)): equivalence_pairs = zip(from_positions, to_positions) elif isinstance(from_positions, Port) and isinstance(to_positions, Port): equivalence_pairs, T = _choose_correct_port(from_positions, to_positions) from_positions.used = True to_positions.used = True else: equivalence_pairs = [(from_positions, to_positions)] if not T: T = _create_equivalence_transform(equivalence_pairs) atom_positions = compound.xyz_with_ports atom_positions = T.apply_to(atom_positions) compound.xyz_with_ports = atom_positions if add_bond: if isinstance(from_positions, Port) and isinstance(to_positions, Port): if not from_positions.anchor or not to_positions.anchor: # TODO: I think warnings is undefined here warn("Attempting to form bond from port that has no anchor") else: from_positions.anchor.parent.add_bond((from_positions.anchor, to_positions.anchor)) to_positions.anchor.parent.add_bond((from_positions.anchor, to_positions.anchor))
Computes an affine transformation that maps the from_positions to the respective to_positions, and applies this transformation to the compound. Parameters ---------- compound : mb.Compound The Compound to be transformed. from_positions : np.ndarray, shape=(n, 3), dtype=float Original positions. to_positions : np.ndarray, shape=(n, 3), dtype=float New positions.
def _get_all_files(filename_regex, path, base_dir, excluded_paths=None, excluded_filename_regex=None): """Get all files for processing. This starts iterating from `base_dir` and checks for all files that look like `filename_regex` under `path` regex excluding all paths under the `excluded_paths` list, whether they are files or folders. `excluded_paths` are explicit paths, not regex. `excluded_filename_regex` are files to be excluded as well. """ # For windows def replace_backslashes(string): return string.replace('\\', '/') excluded_paths = _normalize_excluded_paths(base_dir, excluded_paths) if excluded_paths: logger.info('Excluding paths: %s', excluded_paths) logger.info('Looking for %s under %s...', filename_regex, os.path.join(base_dir, path)) if excluded_filename_regex: logger.info('Excluding file names: %s', excluded_filename_regex) path_expression = re.compile(replace_backslashes(path)) target_files = [] for root, _, files in os.walk(base_dir): if not root.startswith(tuple(excluded_paths)) \ and path_expression.search(replace_backslashes(root)): for filename in files: filepath = os.path.join(root, filename) is_file, matched, excluded_filename, excluded_path = \ _set_match_parameters( filename, filepath, filename_regex, excluded_filename_regex, excluded_paths) if is_file and matched and not excluded_filename \ and not excluded_path: logger.debug('%s is a match. Appending to list...', filepath) target_files.append(filepath) return target_files
Get all files for processing. This starts iterating from `base_dir` and checks for all files that look like `filename_regex` under `path` regex excluding all paths under the `excluded_paths` list, whether they are files or folders. `excluded_paths` are explicit paths, not regex. `excluded_filename_regex` are files to be excluded as well.
def make_function_value_private(self, value, value_type, function): """ Wraps converted value so that it is hidden in logs etc. Note this is not secure just reduces leaking info Allows base 64 encode stuff using base64() or plain hide() in the config """ # remove quotes value = self.remove_quotes(value) if function == "base64": try: import base64 value = base64.b64decode(value).decode("utf-8") except TypeError as e: self.notify_user("base64(..) error %s" % str(e)) # check we are in a module definition etc if not self.current_module: self.notify_user("%s(..) used outside of module or section" % function) return None module = self.current_module[-1].split()[0] if module in CONFIG_FILE_SPECIAL_SECTIONS + I3S_MODULE_NAMES: self.notify_user( "%s(..) cannot be used outside of py3status module " "configuration" % function ) return None value = self.value_convert(value, value_type) module_name = self.current_module[-1] return PrivateHide(value, module_name)
Wraps converted value so that it is hidden in logs etc. Note this is not secure just reduces leaking info Allows base 64 encode stuff using base64() or plain hide() in the config
def write(self, h, txt='', link=''): "Output text in flowing mode" txt = self.normalize_text(txt) cw=self.current_font['cw'] w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size s=txt.replace("\r",'') nb=len(s) sep=-1 i=0 j=0 l=0 nl=1 while(i<nb): #Get next character c=s[i] if(c=="\n"): #Explicit line break self.cell(w,h,substr(s,j,i-j),0,2,'',0,link) i+=1 sep=-1 j=i l=0 if(nl==1): self.x=self.l_margin w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size nl+=1 continue if(c==' '): sep=i if self.unifontsubset: l += self.get_string_width(c) / self.font_size*1000.0 else: l += cw.get(c,0) if(l>wmax): #Automatic line break if(sep==-1): if(self.x>self.l_margin): #Move to next line self.x=self.l_margin self.y+=h w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size i+=1 nl+=1 continue if(i==j): i+=1 self.cell(w,h,substr(s,j,i-j),0,2,'',0,link) else: self.cell(w,h,substr(s,j,sep-j),0,2,'',0,link) i=sep+1 sep=-1 j=i l=0 if(nl==1): self.x=self.l_margin w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size nl+=1 else: i+=1 #Last chunk if(i!=j): self.cell(l/1000.0*self.font_size,h,substr(s,j),0,0,'',0,link)
Output text in flowing mode
def unique_iter(src, key=None): """Yield unique elements from the iterable, *src*, based on *key*, in the order in which they first appeared in *src*. >>> repetitious = [1, 2, 3] * 10 >>> list(unique_iter(repetitious)) [1, 2, 3] By default, *key* is the object itself, but *key* can either be a callable or, for convenience, a string name of the attribute on which to uniqueify objects, falling back on identity when the attribute is not present. >>> pleasantries = ['hi', 'hello', 'ok', 'bye', 'yes'] >>> list(unique_iter(pleasantries, key=lambda x: len(x))) ['hi', 'hello', 'bye'] """ if not is_iterable(src): raise TypeError('expected an iterable, not %r' % type(src)) if key is None: key_func = lambda x: x elif callable(key): key_func = key elif isinstance(key, basestring): key_func = lambda x: getattr(x, key, x) else: raise TypeError('"key" expected a string or callable, not %r' % key) seen = set() for i in src: k = key_func(i) if k not in seen: seen.add(k) yield i return
Yield unique elements from the iterable, *src*, based on *key*, in the order in which they first appeared in *src*. >>> repetitious = [1, 2, 3] * 10 >>> list(unique_iter(repetitious)) [1, 2, 3] By default, *key* is the object itself, but *key* can either be a callable or, for convenience, a string name of the attribute on which to uniqueify objects, falling back on identity when the attribute is not present. >>> pleasantries = ['hi', 'hello', 'ok', 'bye', 'yes'] >>> list(unique_iter(pleasantries, key=lambda x: len(x))) ['hi', 'hello', 'bye']
def to_boulderio(infile, outfile): '''Converts input sequence file into a "Boulder-IO format", as used by primer3''' seq_reader = sequences.file_reader(infile) f_out = utils.open_file_write(outfile) for sequence in seq_reader: print("SEQUENCE_ID=" + sequence.id, file=f_out) print("SEQUENCE_TEMPLATE=" + sequence.seq, file=f_out) print("=", file=f_out) utils.close(f_out)
Converts input sequence file into a "Boulder-IO format", as used by primer3
def _set_cluster(self, v, load=False): """ Setter method for cluster, mapped from YANG variable /mgmt_cluster/cluster (container) If this variable is read-only (config: false) in the source YANG file, then _set_cluster is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cluster() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=cluster.cluster, is_container='container', presence=False, yang_name="cluster", rest_name="cluster", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cluster Management virtual IP Configuration', u'cli-incomplete-no': None}}, namespace='http://brocade.com/ns/brocade-cluster', defining_module='brocade-cluster', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """cluster must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=cluster.cluster, is_container='container', presence=False, yang_name="cluster", rest_name="cluster", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cluster Management virtual IP Configuration', u'cli-incomplete-no': None}}, namespace='http://brocade.com/ns/brocade-cluster', defining_module='brocade-cluster', yang_type='container', is_config=True)""", }) self.__cluster = t if hasattr(self, '_set'): self._set()
Setter method for cluster, mapped from YANG variable /mgmt_cluster/cluster (container) If this variable is read-only (config: false) in the source YANG file, then _set_cluster is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cluster() directly.
def optional(name, default) -> 'Wildcard': """Create a `Wildcard` that matches a single argument with a default value. If the wildcard does not match, the substitution will contain the default value instead. Args: name: The name for the wildcard. default: The default value of the wildcard. Returns: A n optional wildcard. """ return Wildcard(min_count=1, fixed_size=True, variable_name=name, optional=default)
Create a `Wildcard` that matches a single argument with a default value. If the wildcard does not match, the substitution will contain the default value instead. Args: name: The name for the wildcard. default: The default value of the wildcard. Returns: A n optional wildcard.
def train(self, jsondocs, model_dir): """ Train a NER model using given documents. Each word in the documents must have a "label" attribute, which denote the named entities in the documents. Parameters ---------- jsondocs: list of JSON-style documents. The documents used for training the CRF model. model_dir: str A directory where the model will be saved. """ modelUtil = ModelStorageUtil(model_dir) modelUtil.makedir() modelUtil.copy_settings(self.settings) # Convert json documents to ner documents nerdocs = [json_document_to_estner_document(jsondoc) for jsondoc in jsondocs] self.fex.prepare(nerdocs) self.fex.process(nerdocs) self.trainer.train(nerdocs, modelUtil.model_filename)
Train a NER model using given documents. Each word in the documents must have a "label" attribute, which denote the named entities in the documents. Parameters ---------- jsondocs: list of JSON-style documents. The documents used for training the CRF model. model_dir: str A directory where the model will be saved.
def extend_peaks(self, prop_thresh=50): """Each peak in the peaks of the object is checked for its presence in other octaves. If it does not exist, it is created. prop_thresh is the cent range within which the peak in the other octave is expected to be present, i.e., only if there is a peak within this cent range in other octaves, then the peak is considered to be present in that octave. Note that this does not change the peaks of the object. It just returns the extended peaks. """ # octave propagation of the reference peaks temp_peaks = [i + 1200 for i in self.peaks["peaks"][0]] temp_peaks.extend([i - 1200 for i in self.peaks["peaks"][0]]) extended_peaks = [] extended_peaks.extend(self.peaks["peaks"][0]) for i in temp_peaks: # if a peak exists around, don't add this new one. nearest_ind = slope.find_nearest_index(self.peaks["peaks"][0], i) diff = abs(self.peaks["peaks"][0][nearest_ind] - i) diff = np.mod(diff, 1200) if diff > prop_thresh: extended_peaks.append(i) return extended_peaks
Each peak in the peaks of the object is checked for its presence in other octaves. If it does not exist, it is created. prop_thresh is the cent range within which the peak in the other octave is expected to be present, i.e., only if there is a peak within this cent range in other octaves, then the peak is considered to be present in that octave. Note that this does not change the peaks of the object. It just returns the extended peaks.
def mach2tas(Mach, H): """Mach number to True Airspeed""" a = vsound(H) Vtas = Mach*a return Vtas
Mach number to True Airspeed
def deploy(verbose, app): """Deploy app using Heroku to MTurk.""" # Load psiTurk configuration. config = PsiturkConfig() config.load_config() # Set the mode. config.set("Experiment Configuration", "mode", "deploy") config.set("Server Parameters", "logfile", "-") # Ensure that psiTurk is not in sandbox mode. config.set("Shell Parameters", "launch_in_sandbox_mode", "false") # Do shared setup. deploy_sandbox_shared_setup(verbose=verbose, app=app)
Deploy app using Heroku to MTurk.
def reduce(self, dimensions=[], function=None, spreadfn=None, **reductions): """Applies reduction along the specified dimension(s). Allows reducing the values along one or more key dimension with the supplied function. Supports two signatures: Reducing with a list of dimensions, e.g.: ds.reduce(['x'], np.mean) Defining a reduction using keywords, e.g.: ds.reduce(x=np.mean) Args: dimensions: Dimension(s) to apply reduction on Defaults to all key dimensions function: Reduction operation to apply, e.g. numpy.mean spreadfn: Secondary reduction to compute value spread Useful for computing a confidence interval, spread, or standard deviation. **reductions: Keyword argument defining reduction Allows reduction to be defined as keyword pair of dimension and function Returns: The Dataset after reductions have been applied. """ if any(dim in self.vdims for dim in dimensions): raise Exception("Reduce cannot be applied to value dimensions") function, dims = self._reduce_map(dimensions, function, reductions) dims = [d for d in self.kdims if d not in dims] return self.aggregate(dims, function, spreadfn)
Applies reduction along the specified dimension(s). Allows reducing the values along one or more key dimension with the supplied function. Supports two signatures: Reducing with a list of dimensions, e.g.: ds.reduce(['x'], np.mean) Defining a reduction using keywords, e.g.: ds.reduce(x=np.mean) Args: dimensions: Dimension(s) to apply reduction on Defaults to all key dimensions function: Reduction operation to apply, e.g. numpy.mean spreadfn: Secondary reduction to compute value spread Useful for computing a confidence interval, spread, or standard deviation. **reductions: Keyword argument defining reduction Allows reduction to be defined as keyword pair of dimension and function Returns: The Dataset after reductions have been applied.
def all_files(models=[]): r''' Return a list of full path of files matching 'models', sorted in human numerical order (i.e., 0 1 2 ..., 10 11 12, ..., 100, ..., 1000). Files are supposed to be named identically except one variable component e.g. the list, test.weights.e5.lstm1200.ldc93s1.pb test.weights.e5.lstm1000.ldc93s1.pb test.weights.e5.lstm800.ldc93s1.pb gets sorted: test.weights.e5.lstm800.ldc93s1.pb test.weights.e5.lstm1000.ldc93s1.pb test.weights.e5.lstm1200.ldc93s1.pb ''' def nsort(a, b): fa = os.path.basename(a).split('.') fb = os.path.basename(b).split('.') elements_to_remove = [] assert len(fa) == len(fb) for i in range(0, len(fa)): if fa[i] == fb[i]: elements_to_remove.append(fa[i]) for e in elements_to_remove: fa.remove(e) fb.remove(e) assert len(fa) == len(fb) assert len(fa) == 1 fa = keep_only_digits(fa[0]) fb = keep_only_digits(fb[0]) if fa < fb: return -1 if fa == fb: return 0 if fa > fb: return 1 base = list(map(lambda x: os.path.abspath(x), maybe_inspect_zip(models))) base.sort(cmp=nsort) return base
r''' Return a list of full path of files matching 'models', sorted in human numerical order (i.e., 0 1 2 ..., 10 11 12, ..., 100, ..., 1000). Files are supposed to be named identically except one variable component e.g. the list, test.weights.e5.lstm1200.ldc93s1.pb test.weights.e5.lstm1000.ldc93s1.pb test.weights.e5.lstm800.ldc93s1.pb gets sorted: test.weights.e5.lstm800.ldc93s1.pb test.weights.e5.lstm1000.ldc93s1.pb test.weights.e5.lstm1200.ldc93s1.pb
def verifyUpdatewcs(fname): """ Verify the existence of WCSNAME in the file. If it is not present, report this to the user and raise an exception. Returns True if WCSNAME was found in all SCI extensions. """ updated = True numsci,extname = count_sci_extensions(fname) for n in range(1,numsci+1): hdr = fits.getheader(fname, extname=extname, extver=n, memmap=False) if 'wcsname' not in hdr: updated = False break return updated
Verify the existence of WCSNAME in the file. If it is not present, report this to the user and raise an exception. Returns True if WCSNAME was found in all SCI extensions.
def make_action(self, fn, schema_parser, meta): """ Make resource's method an action Validate input, output by schema in meta. If no input schema, call fn without params. If no output schema, will not validate return value. Args: fn: resource's method schema_parser: for parsing schema in meta meta: meta data of the action """ validate_input = validate_output = None if "$input" in meta: with MarkKey("$input"): validate_input = schema_parser.parse(meta["$input"]) if "$output" in meta: with MarkKey("$output"): validate_output = schema_parser.parse(meta["$output"]) def action(data): if validate_input: try: data = validate_input(data) except Invalid as ex: return abort(400, "InvalidData", str(ex)) if isinstance(data, dict): rv = fn(**data) else: rv = fn(data) else: rv = fn() rv, status, headers = unpack(rv) if validate_output: try: rv = validate_output(rv) except Invalid as ex: return abort(500, "ServerError", str(ex)) return rv, status, headers return action
Make resource's method an action Validate input, output by schema in meta. If no input schema, call fn without params. If no output schema, will not validate return value. Args: fn: resource's method schema_parser: for parsing schema in meta meta: meta data of the action
def get_platform(): """Get the current platform data. Returns a dictionary with keys: `os_name`, `os_bits` """ platform_data = { 'os_name': None, 'os_bits': None } os_name = platform.system() normalize_os = { 'Windows': 'windows', 'Linux': 'linux', 'Darwin': 'mac' } if os_name in normalize_os.keys(): platform_data['os_name'] = normalize_os[os_name] else: raise Exception('Could not normalize os name {}'.format(os_name)) # try to get the os bits maxsize = sys.maxsize if maxsize == EXPECTED_MAXSIZE_32: platform_data['os_bits'] = '32' elif maxsize == EXPECTED_MAXSIZE_64: platform_data['os_bits'] = '64' else: platform_data['os_bits'] = '64' logger.warning('could not determine os bits, setting default to 64') return platform_data
Get the current platform data. Returns a dictionary with keys: `os_name`, `os_bits`
def index(self, value, start=0, end=None): """Return the index of value between start and end. By default, the entire setlist is searched. This runs in O(1) Args: value: The value to find the index of start (int): The index to start searching at (defaults to 0) end (int): The index to stop searching at (defaults to the end of the list) Returns: int: The index of the value Raises: ValueError: If the value is not in the list or outside of start - end IndexError: If start or end are out of range """ try: index = self._dict[value] except KeyError: raise ValueError else: start = self._fix_neg_index(start) end = self._fix_end_index(end) if start <= index and index < end: return index else: raise ValueError
Return the index of value between start and end. By default, the entire setlist is searched. This runs in O(1) Args: value: The value to find the index of start (int): The index to start searching at (defaults to 0) end (int): The index to stop searching at (defaults to the end of the list) Returns: int: The index of the value Raises: ValueError: If the value is not in the list or outside of start - end IndexError: If start or end are out of range
def fromDataFrameRDD(cls, rdd, sql_ctx): """Construct a DataFrame from an RDD of DataFrames. No checking or validation occurs.""" result = DataFrame(None, sql_ctx) return result.from_rdd_of_dataframes(rdd)
Construct a DataFrame from an RDD of DataFrames. No checking or validation occurs.
def load_vm_uuid_by_name(self, si, vcenter_data_model, vm_name): """ Returns the vm uuid :param si: Service instance to the vcenter :param vcenter_data_model: vcenter data model :param vm_name: the vm name :return: str uuid """ path = VMLocation.combine([vcenter_data_model.default_datacenter, vm_name]) paths = path.split('/') name = paths[len(paths) - 1] path = VMLocation.combine(paths[:len(paths) - 1]) vm = self.pv_service.find_vm_by_name(si, path, name) if not vm: raise ValueError('Could not find the vm in the given path: {0}/{1}'.format(path, name)) if isinstance(vm, vim.VirtualMachine): return vm.config.uuid raise ValueError('The given object is not a vm: {0}/{1}'.format(path, name))
Returns the vm uuid :param si: Service instance to the vcenter :param vcenter_data_model: vcenter data model :param vm_name: the vm name :return: str uuid
def do_wordwrap(s, width=79, break_long_words=True): """ Return a copy of the string passed to the filter wrapped after ``79`` characters. You can override this default using the first parameter. If you set the second parameter to `false` Jinja will not split words apart if they are longer than `width`. """ import textwrap return u'\n'.join(textwrap.wrap(s, width=width, expand_tabs=False, replace_whitespace=False, break_long_words=break_long_words))
Return a copy of the string passed to the filter wrapped after ``79`` characters. You can override this default using the first parameter. If you set the second parameter to `false` Jinja will not split words apart if they are longer than `width`.
def upgrade_plan_list(self, subid, params=None): ''' /v1/server/upgrade_plan_list GET - account Retrieve a list of the VPSPLANIDs for which a virtual machine can be upgraded. An empty response array means that there are currently no upgrades available. Link: https://www.vultr.com/api/#server_upgrade_plan_list ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/upgrade_plan_list', params, 'GET')
/v1/server/upgrade_plan_list GET - account Retrieve a list of the VPSPLANIDs for which a virtual machine can be upgraded. An empty response array means that there are currently no upgrades available. Link: https://www.vultr.com/api/#server_upgrade_plan_list
def _GetDataStreams(self): """Retrieves the data streams. Returns: list[DataStream]: data streams. """ if self._data_streams is None: if self._directory is None: self._directory = self._GetDirectory() self._data_streams = [] # It is assumed that directory and link file entries typically # do not have data streams. if not self._directory and not self.link: data_stream = DataStream() self._data_streams.append(data_stream) return self._data_streams
Retrieves the data streams. Returns: list[DataStream]: data streams.
def set_options_values(self, options, parse=False, strict=False): """ Set the options from a dict of values (in string). :param option_values: the values of options (in format `{"opt_name": "new_value"}`) :type option_values: dict :param parse: whether to parse the given value :type parse: bool :param strict: if True the given `option_values` dict should only contains existing options (no other key) :type strict: bool """ if strict: for opt_name in options.keys(): if not self.has_option(opt_name): raise ValueError("'%s' is not a option of the component" % opt_name) elif self.option_is_hidden(opt_name): raise ValueError("'%s' is hidden, you can't set it" % opt_name) for opt_name, opt in self._options.items(): if opt.hidden: continue if opt_name in options: opt.set(options[opt_name], parse=parse)
Set the options from a dict of values (in string). :param option_values: the values of options (in format `{"opt_name": "new_value"}`) :type option_values: dict :param parse: whether to parse the given value :type parse: bool :param strict: if True the given `option_values` dict should only contains existing options (no other key) :type strict: bool
def write_reaction(self, value_dict): con = self.connection or self._connect() self._initialize(con) cur = con.cursor() ase_ids = value_dict['ase_ids'] energy_corrections = value_dict.get('energy_corrections', {}) key_list = get_key_list(start_index=1) values = [value_dict[key] for key in key_list] key_str = get_key_str('reaction', start_index=1) value_str = get_value_str(values) insert_command = \ """INSERT INTO reaction ({0}) VALUES ({1}) RETURNING id;"""\ .format(key_str, value_str) cur.execute(insert_command) id = cur.fetchone()[0] reaction_system_values = [] """ Write to reaction_system tables""" for name, ase_id in ase_ids.items(): if name in energy_corrections: energy_correction = energy_corrections[name] else: energy_correction = 0 reaction_system_values += [tuple([name, energy_correction, ase_id, id])] key_str = get_key_str('reaction_system') insert_command = """INSERT INTO reaction_system ({0}) VALUES %s ON CONFLICT DO NOTHING;""".format(key_str) execute_values(cur=cur, sql=insert_command, argslist=reaction_system_values, page_size=1000) if self.connection is None: con.commit() con.close() return id
Write to reaction_system tables
def kill_current_session(ctx: Context_T) -> None: """ Force kill current session of the given context, despite whether it is running or not. :param ctx: message context """ ctx_id = context_id(ctx) if ctx_id in _sessions: del _sessions[ctx_id]
Force kill current session of the given context, despite whether it is running or not. :param ctx: message context
def register_postparsing_hook(self, func: Callable[[plugin.PostparsingData], plugin.PostparsingData]) -> None: """Register a function to be called after parsing user input but before running the command""" self._validate_postparsing_callable(func) self._postparsing_hooks.append(func)
Register a function to be called after parsing user input but before running the command
def feed(self, pred, label): """ Args: pred (np.ndarray): binary array. label (np.ndarray): binary array of the same size. """ assert pred.shape == label.shape, "{} != {}".format(pred.shape, label.shape) self.nr_pos += (label == 1).sum() self.nr_neg += (label == 0).sum() self.nr_pred_pos += (pred == 1).sum() self.nr_pred_neg += (pred == 0).sum() self.corr_pos += ((pred == 1) & (pred == label)).sum() self.corr_neg += ((pred == 0) & (pred == label)).sum()
Args: pred (np.ndarray): binary array. label (np.ndarray): binary array of the same size.
def policy_exists(policy_name, region=None, key=None, keyid=None, profile=None): ''' Check to see if policy exists. CLI Example: .. code-block:: bash salt myminion boto_iam.instance_profile_exists myiprofile ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.get_policy(_get_policy_arn(policy_name, region=region, key=key, keyid=keyid, profile=profile)) return True except boto.exception.BotoServerError: return False
Check to see if policy exists. CLI Example: .. code-block:: bash salt myminion boto_iam.instance_profile_exists myiprofile
def fft_coefficient(self, x, param=None): """ As in tsfresh `fft_coefficient <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\ feature_calculators.py#L852>`_ \ Calculates the fourier coefficients of the one-dimensional discrete Fourier Transform for real input by fast \ fourier transformation algorithm .. math:: A_k = \\sum_{m=0}^{n-1} a_m \\exp \\left \\{ -2 \\pi i \\frac{m k}{n} \\right \\}, \\qquad k = 0, \\ldots , n-1. The resulting coefficients will be complex, this feature calculator can return the real part (attr=="real"), \ the imaginary part (attr=="imag), the absolute value (attr=""abs) and the angle in degrees (attr=="angle). :param x: the time series to calculate the feature of :type x: pandas.Series :param param: contains dictionaries {"coeff": x, "attr": s} with x int and x >= 0, s str and in ["real", "imag"\ , "abs", "angle"] :type param: list :return: the different feature values :rtype: pandas.Series """ if param is None: param = [{'attr': 'abs', 'coeff': 44}, {'attr': 'abs', 'coeff': 63}, {'attr': 'abs', 'coeff': 0}, {'attr': 'real', 'coeff': 0}, {'attr': 'real', 'coeff': 23}] _fft_coef = feature_calculators.fft_coefficient(x, param) logging.debug("fft coefficient by tsfresh calculated") return list(_fft_coef)
As in tsfresh `fft_coefficient <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\ feature_calculators.py#L852>`_ \ Calculates the fourier coefficients of the one-dimensional discrete Fourier Transform for real input by fast \ fourier transformation algorithm .. math:: A_k = \\sum_{m=0}^{n-1} a_m \\exp \\left \\{ -2 \\pi i \\frac{m k}{n} \\right \\}, \\qquad k = 0, \\ldots , n-1. The resulting coefficients will be complex, this feature calculator can return the real part (attr=="real"), \ the imaginary part (attr=="imag), the absolute value (attr=""abs) and the angle in degrees (attr=="angle). :param x: the time series to calculate the feature of :type x: pandas.Series :param param: contains dictionaries {"coeff": x, "attr": s} with x int and x >= 0, s str and in ["real", "imag"\ , "abs", "angle"] :type param: list :return: the different feature values :rtype: pandas.Series
def handle_stdin_request(self, timeout=0.1): """ Method to capture raw_input """ msg_rep = self.km.stdin_channel.get_msg(timeout=timeout) # in case any iopub came while we were waiting: self.handle_iopub() if self.session_id == msg_rep["parent_header"].get("session"): # wrap SIGINT handler real_handler = signal.getsignal(signal.SIGINT) def double_int(sig,frame): # call real handler (forwards sigint to kernel), # then raise local interrupt, stopping local raw_input real_handler(sig,frame) raise KeyboardInterrupt signal.signal(signal.SIGINT, double_int) try: raw_data = raw_input(msg_rep["content"]["prompt"]) except EOFError: # turn EOFError into EOF character raw_data = '\x04' except KeyboardInterrupt: sys.stdout.write('\n') return finally: # restore SIGINT handler signal.signal(signal.SIGINT, real_handler) # only send stdin reply if there *was not* another request # or execution finished while we were reading. if not (self.km.stdin_channel.msg_ready() or self.km.shell_channel.msg_ready()): self.km.stdin_channel.input(raw_data)
Method to capture raw_input
def modify_content(request, page_id, content_type, language_id): """Modify the content of a page.""" page = get_object_or_404(Page, pk=page_id) perm = request.user.has_perm('pages.change_page') if perm and request.method == 'POST': content = request.POST.get('content', False) if not content: raise Http404 page = Page.objects.get(pk=page_id) if settings.PAGE_CONTENT_REVISION: Content.objects.create_content_if_changed(page, language_id, content_type, content) else: Content.objects.set_or_create_content(page, language_id, content_type, content) page.invalidate() # to update last modification date page.save() return HttpResponse('ok') raise Http404
Modify the content of a page.
def query_by_postid(postid, limit=5): ''' Query history of certian records. ''' recs = TabPostHist.select().where( TabPostHist.post_id == postid ).order_by( TabPostHist.time_update.desc() ).limit(limit) return recs
Query history of certian records.
def dump_viewset(viewset_class, root_folder, folder_fn=lambda i: ".", sample_size=None): """ Dump the contents of a rest-api queryset to a folder structure. :param viewset_class: A rest-api viewset to iterate through :param root_folder: The root folder to write results to. :param folder_fn: A function to generate a subfolder name for the instance. :param sample_size: Number of items to process, for test purposes. :return: """ if os.path.exists(root_folder): shutil.rmtree(root_folder) os.makedirs(root_folder) vs = viewset_class() vs.request = rf.get('') serializer_class = vs.get_serializer_class() serializer = serializer_class(context={'request': vs.request, 'format': 'json', 'view': vs}) renderer = PrettyJSONRenderer() bar = progressbar.ProgressBar() for instance in bar(vs.get_queryset()[:sample_size]): dct = serializer.to_representation(instance) content = renderer.render(dct) folder = os.path.join(root_folder, folder_fn(instance)) if not os.path.exists(folder): os.makedirs(folder) filename = "%s.json" % instance.slug f = file(os.path.join(folder, filename), 'w') f.write(content) f.close()
Dump the contents of a rest-api queryset to a folder structure. :param viewset_class: A rest-api viewset to iterate through :param root_folder: The root folder to write results to. :param folder_fn: A function to generate a subfolder name for the instance. :param sample_size: Number of items to process, for test purposes. :return:
def sdiv(a, b): """Safe division: if a == b == 0, sdiv(a, b) == 1""" if len(a) != len(b): raise ValueError('Argument a and b does not have the same length') idx = 0 ret = matrix(0, (len(a), 1), 'd') for m, n in zip(a, b): try: ret[idx] = m / n except ZeroDivisionError: ret[idx] = 1 finally: idx += 1 return ret
Safe division: if a == b == 0, sdiv(a, b) == 1
def stepper_step(self, motor_speed, number_of_steps): """ Move a stepper motor for the number of steps at the specified speed This is a FirmataPlus feature. :param motor_speed: 21 bits of data to set motor speed :param number_of_steps: 14 bits for number of steps & direction positive is forward, negative is reverse """ task = asyncio.ensure_future(self.core.stepper_step(motor_speed, number_of_steps)) self.loop.run_until_complete(task)
Move a stepper motor for the number of steps at the specified speed This is a FirmataPlus feature. :param motor_speed: 21 bits of data to set motor speed :param number_of_steps: 14 bits for number of steps & direction positive is forward, negative is reverse
def absolute(requestContext, seriesList): """ Takes one metric or a wildcard seriesList and applies the mathematical abs function to each datapoint transforming it to its absolute value. Example:: &target=absolute(Server.instance01.threads.busy) &target=absolute(Server.instance*.threads.busy) """ for series in seriesList: series.name = "absolute(%s)" % (series.name) series.pathExpression = series.name for i, value in enumerate(series): series[i] = safeAbs(value) return seriesList
Takes one metric or a wildcard seriesList and applies the mathematical abs function to each datapoint transforming it to its absolute value. Example:: &target=absolute(Server.instance01.threads.busy) &target=absolute(Server.instance*.threads.busy)
def _create_gates(self, inputs, memory): """Create input and forget gates for this step using `inputs` and `memory`. Args: inputs: Tensor input. memory: The current state of memory. Returns: input_gate: A LSTM-like insert gate. forget_gate: A LSTM-like forget gate. """ # We'll create the input and forget gates at once. Hence, calculate double # the gate size. num_gates = 2 * self._calculate_gate_size() memory = tf.tanh(memory) inputs = basic.BatchFlatten()(inputs) gate_inputs = basic.BatchApply(basic.Linear(num_gates), n_dims=1)(inputs) gate_inputs = tf.expand_dims(gate_inputs, axis=1) gate_memory = basic.BatchApply(basic.Linear(num_gates))(memory) gates = tf.split(gate_memory + gate_inputs, num_or_size_splits=2, axis=2) input_gate, forget_gate = gates input_gate = tf.sigmoid(input_gate + self._input_bias) forget_gate = tf.sigmoid(forget_gate + self._forget_bias) return input_gate, forget_gate
Create input and forget gates for this step using `inputs` and `memory`. Args: inputs: Tensor input. memory: The current state of memory. Returns: input_gate: A LSTM-like insert gate. forget_gate: A LSTM-like forget gate.
def light2mass_interpol(lens_light_model_list, kwargs_lens_light, numPix=100, deltaPix=0.05, subgrid_res=5, center_x=0, center_y=0): """ takes a lens light model and turns it numerically in a lens model (with all lensmodel quantities computed on a grid). Then provides an interpolated grid for the quantities. :param kwargs_lens_light: lens light keyword argument list :param numPix: number of pixels per axis for the return interpolation :param deltaPix: interpolation/pixel size :param center_x: center of the grid :param center_y: center of the grid :param subgrid: subgrid for the numerical integrals :return: """ # make sugrid x_grid_sub, y_grid_sub = util.make_grid(numPix=numPix*5, deltapix=deltaPix, subgrid_res=subgrid_res) import lenstronomy.Util.mask as mask_util mask = mask_util.mask_sphere(x_grid_sub, y_grid_sub, center_x, center_y, r=1) x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) # compute light on the subgrid lightModel = LightModel(light_model_list=lens_light_model_list) flux = lightModel.surface_brightness(x_grid_sub, y_grid_sub, kwargs_lens_light) flux_norm = np.sum(flux[mask == 1]) / np.sum(mask) flux /= flux_norm from lenstronomy.LensModel.numerical_profile_integrals import ConvergenceIntegrals integral = ConvergenceIntegrals() # compute lensing quantities with subgrid convergence_sub = flux f_x_sub, f_y_sub = integral.deflection_from_kappa(convergence_sub, x_grid_sub, y_grid_sub, deltaPix=deltaPix/float(subgrid_res)) f_sub = integral.potential_from_kappa(convergence_sub, x_grid_sub, y_grid_sub, deltaPix=deltaPix/float(subgrid_res)) # interpolation function on lensing quantities x_axes_sub, y_axes_sub = util.get_axes(x_grid_sub, y_grid_sub) from lenstronomy.LensModel.Profiles.interpol import Interpol interp_func = Interpol() interp_func.do_interp(x_axes_sub, y_axes_sub, f_sub, f_x_sub, f_y_sub) # compute lensing quantities on sparser grid x_axes, y_axes = util.get_axes(x_grid, y_grid) f_ = interp_func.function(x_grid, y_grid) f_x, f_y = interp_func.derivatives(x_grid, y_grid) # numerical differentials for second order differentials from lenstronomy.LensModel.numeric_lens_differentials import NumericLens lens_differential = NumericLens(lens_model_list=['INTERPOL']) kwargs = [{'grid_interp_x': x_axes_sub, 'grid_interp_y': y_axes_sub, 'f_': f_sub, 'f_x': f_x_sub, 'f_y': f_y_sub}] f_xx, f_xy, f_yx, f_yy = lens_differential.hessian(x_grid, y_grid, kwargs) kwargs_interpol = {'grid_interp_x': x_axes, 'grid_interp_y': y_axes, 'f_': util.array2image(f_), 'f_x': util.array2image(f_x), 'f_y': util.array2image(f_y), 'f_xx': util.array2image(f_xx), 'f_xy': util.array2image(f_xy), 'f_yy': util.array2image(f_yy)} return kwargs_interpol
takes a lens light model and turns it numerically in a lens model (with all lensmodel quantities computed on a grid). Then provides an interpolated grid for the quantities. :param kwargs_lens_light: lens light keyword argument list :param numPix: number of pixels per axis for the return interpolation :param deltaPix: interpolation/pixel size :param center_x: center of the grid :param center_y: center of the grid :param subgrid: subgrid for the numerical integrals :return:
def get_queue(cls, name, priority=0, **fields_if_new): """ Get, or create, and return the wanted queue. If the queue is created, fields in fields_if_new will be set for the new queue. """ queue_kwargs = {'name': name, 'priority': priority} retries = 0 while retries < 10: retries += 1 try: queue, created = cls.get_or_connect(**queue_kwargs) except IndexError: # Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP # => retry continue except ValueError: # more than one (race condition https://github.com/yohanboniface/redis-limpyd/issues/82 ?) try: queue = cls.collection(**queue_kwargs).instances()[0] except IndexError: # but no more now ?! # => retry continue else: created = False # ok we have our queue, stop now break if created and fields_if_new: queue.set_fields(**fields_if_new) return queue
Get, or create, and return the wanted queue. If the queue is created, fields in fields_if_new will be set for the new queue.
def destroy(self): """Close the connection, and close any associated CBS authentication session. """ try: self.lock() _logger.debug("Unlocked connection %r to close.", self.container_id) self._close() finally: self.release() uamqp._Platform.deinitialize()
Close the connection, and close any associated CBS authentication session.
def _fill_levenshtein_table(self, first, second, update_func, add_pred, clear_pred, threshold=None): """ Функция, динамически заполняющая таблицу costs стоимости трансдукций, costs[i][j] --- минимальная стоимость трансдукции, переводящей first[:i] в second[:j] Аргументы: ---------- first, second : string Верхний и нижний элементы трансдукции update_func : callable, float*float -> bool update_func(x, y) возвращает новое значение в ячейке таблицы costs, если старое значение --- y, а потенциально новое значение --- x везде update_func = min add_pred : callable : float*float -> bool add_pred(x, y) возвращает, производится ли добавление нового элемента p стоимости x в ячейку backtraces[i][j] в зависимости от значения costs[i][j]=y и текущей стоимости x clear_pred : callable : float*float -> bool clear_pred(x, y) возвращает, производится ли очистка ячейки backtraces[i][j] в зависимости от значения costs[i][j]=y и текущей стоимости x элемента p, добавляемого в эту ячейку Возвращает: ----------- costs : array, dtype=float, shape=(len(first)+1, len(second)+1) массив, в ячейке с индексами i, j которого хранится минимальная стоимость трансдукции, переводящей first[:i] в second[:j] backtraces : array, dtype=list, shape=(len(first)+1, len(second)+1) массив, в ячейке с индексами i, j которого хранятся обратные ссылки на предыдущую ячейку в оптимальной трансдукции, приводящей в ячейку backtraces[i][j] """ m, n = len(first), len(second) # если threshold=None, то в качестве порога берётся удвоенная стоимость # трансдукции, отображающей символы на одинаковых позициях друг в друга if threshold is None: threshold = 0.0 for a, b in zip(first, second): threshold += self.get_operation_cost(a, b) if m > n: for a in first[n: ]: threshold += self.get_operation_cost(a, '') elif m < n: for b in second[m: ]: threshold += self.get_operation_cost('', b) threshold *= 2 # инициализация возвращаемых массивов costs = np.zeros(shape=(m + 1, n + 1), dtype=np.float64) costs[:] = np.inf backtraces = [None] * (m + 1) for i in range(m + 1): backtraces[i] = [[] for j in range(n + 1)] costs[0][0] = 0.0 for i in range(m + 1): for i_right in range(i, min(i + self.max_up_length, m) + 1): up = first[i: i_right] max_low_length = self.max_low_lengths_by_up.get(up, -1) if max_low_length == -1: # no up key in transduction continue up_costs = self.operation_costs[up] for j in range(n + 1): if costs[i][j] > threshold: continue if len(backtraces[i][j]) == 0 and i + j > 0: continue # не нашлось обратных ссылок for j_right in range((j if i_right > i else j + 1), min(j + max_low_length, n) + 1): low = second[j: j_right] curr_cost = up_costs.get(low, np.inf) old_cost = costs[i_right][j_right] new_cost = costs[i][j] + curr_cost if new_cost > threshold: continue if add_pred(new_cost, old_cost): if clear_pred(new_cost, old_cost): backtraces[i_right][j_right] = [] costs[i_right][j_right] = update_func(new_cost, old_cost) backtraces[i_right][j_right].append((i, j)) return costs, backtraces
Функция, динамически заполняющая таблицу costs стоимости трансдукций, costs[i][j] --- минимальная стоимость трансдукции, переводящей first[:i] в second[:j] Аргументы: ---------- first, second : string Верхний и нижний элементы трансдукции update_func : callable, float*float -> bool update_func(x, y) возвращает новое значение в ячейке таблицы costs, если старое значение --- y, а потенциально новое значение --- x везде update_func = min add_pred : callable : float*float -> bool add_pred(x, y) возвращает, производится ли добавление нового элемента p стоимости x в ячейку backtraces[i][j] в зависимости от значения costs[i][j]=y и текущей стоимости x clear_pred : callable : float*float -> bool clear_pred(x, y) возвращает, производится ли очистка ячейки backtraces[i][j] в зависимости от значения costs[i][j]=y и текущей стоимости x элемента p, добавляемого в эту ячейку Возвращает: ----------- costs : array, dtype=float, shape=(len(first)+1, len(second)+1) массив, в ячейке с индексами i, j которого хранится минимальная стоимость трансдукции, переводящей first[:i] в second[:j] backtraces : array, dtype=list, shape=(len(first)+1, len(second)+1) массив, в ячейке с индексами i, j которого хранятся обратные ссылки на предыдущую ячейку в оптимальной трансдукции, приводящей в ячейку backtraces[i][j]
def helioX(self,*args,**kwargs): """ NAME: helioX PURPOSE: return Heliocentric Galactic rectangular x-coordinate (aka "X") INPUT: t - (optional) time at which to get X (can be Quantity) obs=[X,Y,Z] - (optional) position of observer in the Galactocentric frame (in kpc and km/s) (default=[8.0,0.,0.]; entries can be Quantity) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) OUTPUT: helioX(t) in kpc HISTORY: 2011-02-24 - Written - Bovy (NYU) """ out= self._orb.helioX(*args,**kwargs) if len(out) == 1: return out[0] else: return out
NAME: helioX PURPOSE: return Heliocentric Galactic rectangular x-coordinate (aka "X") INPUT: t - (optional) time at which to get X (can be Quantity) obs=[X,Y,Z] - (optional) position of observer in the Galactocentric frame (in kpc and km/s) (default=[8.0,0.,0.]; entries can be Quantity) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) OUTPUT: helioX(t) in kpc HISTORY: 2011-02-24 - Written - Bovy (NYU)
def processors(self): """The list of all processors (preprocessors, compilers, postprocessors) used to build asset. """ return self.preprocessors + list(reversed(self.compilers)) + self.postprocessors
The list of all processors (preprocessors, compilers, postprocessors) used to build asset.
def decode(string, base): """ Given a string (string) and a numeric base (base), decode the string into an integer. Returns the integer """ base = int(base) code_string = get_code_string(base) result = 0 if base == 16: string = string.lower() while len(string) > 0: result *= base result += code_string.find(string[0]) string = string[1:] return result
Given a string (string) and a numeric base (base), decode the string into an integer. Returns the integer
def get_github_login(self, user, rol, commit_hash, repo): """ rol: author or committer """ login = None try: login = self.github_logins[user] except KeyError: # Get the login from github API GITHUB_API_URL = "https://api.github.com" commit_url = GITHUB_API_URL + "/repos/%s/commits/%s" % (repo, commit_hash) headers = {'Authorization': 'token ' + self.github_token} r = self.requests.get(commit_url, headers=headers) try: r.raise_for_status() except requests.exceptions.ConnectionError as ex: # Connection error logger.error("Can't get github login for %s in %s because a connection error ", repo, commit_hash) return login self.rate_limit = int(r.headers['X-RateLimit-Remaining']) self.rate_limit_reset_ts = int(r.headers['X-RateLimit-Reset']) logger.debug("Rate limit pending: %s", self.rate_limit) if self.rate_limit <= self.min_rate_to_sleep: seconds_to_reset = self.rate_limit_reset_ts - int(time.time()) + 1 if seconds_to_reset < 0: seconds_to_reset = 0 cause = "GitHub rate limit exhausted." logger.info("%s Waiting %i secs for rate limit reset.", cause, seconds_to_reset) time.sleep(seconds_to_reset) # Retry once we have rate limit r = self.requests.get(commit_url, headers=headers) try: r.raise_for_status() except requests.exceptions.HTTPError as ex: # commit not found probably or rate limit exhausted logger.error("Can't find commit %s %s", commit_url, ex) return login commit_json = r.json() author_login = None if 'author' in commit_json and commit_json['author']: author_login = commit_json['author']['login'] else: self.github_logins_author_not_found += 1 user_login = None if 'committer' in commit_json and commit_json['committer']: user_login = commit_json['committer']['login'] else: self.github_logins_committer_not_found += 1 if rol == "author": login = author_login elif rol == "committer": login = user_login else: logger.error("Wrong rol: %s" % (rol)) raise RuntimeError self.github_logins[user] = login logger.debug("%s is %s in github (not found %i authors %i committers )", user, login, self.github_logins_author_not_found, self.github_logins_committer_not_found) return login
rol: author or committer
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_admin_status(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list") fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id") fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id') fcoe_intf_admin_status = ET.SubElement(fcoe_intf_list, "fcoe-intf-admin-status") fcoe_intf_admin_status.text = kwargs.pop('fcoe_intf_admin_status') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def execute(mp): """ Example process for testing. Inputs: ------- file1 raster file Parameters: ----------- Output: ------- np.ndarray """ # Reading and writing data works like this: with mp.open("file1", resampling="bilinear") as raster_file: if raster_file.is_empty(): return "empty" # This assures a transparent tile instead of a pink error tile # is returned when using mapchete serve. dem = raster_file.read() return dem
Example process for testing. Inputs: ------- file1 raster file Parameters: ----------- Output: ------- np.ndarray
def hide_routemap_holder_route_map_content_match_metric_metric_rmm(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') content = ET.SubElement(route_map, "content") match = ET.SubElement(content, "match") metric = ET.SubElement(match, "metric") metric_rmm = ET.SubElement(metric, "metric-rmm") metric_rmm.text = kwargs.pop('metric_rmm') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def create_from_tuples(self, tuples, **args): """ Creates from a list of (subj,subj_name,obj) tuples """ amap = {} subject_label_map = {} for a in tuples: subj = a[0] subject_label_map[subj] = a[1] if subj not in amap: amap[subj] = [] amap[subj].append(a[2]) aset = AssociationSet(subject_label_map=subject_label_map, association_map=amap, **args) return aset
Creates from a list of (subj,subj_name,obj) tuples
def prepare_queues(queues, lock): """Replaces queue._put() method in order to notify the waiting Condition.""" for queue in queues: queue._pebble_lock = lock with queue.mutex: queue._pebble_old_method = queue._put queue._put = MethodType(new_method, queue)
Replaces queue._put() method in order to notify the waiting Condition.
def _BuildMessageFromTypeName(type_name, descriptor_pool): """Returns a protobuf message instance. Args: type_name: Fully-qualified protobuf message type name string. descriptor_pool: DescriptorPool instance. Returns: A Message instance of type matching type_name, or None if the a Descriptor wasn't found matching type_name. """ # pylint: disable=g-import-not-at-top from google.protobuf import symbol_database database = symbol_database.Default() try: message_descriptor = descriptor_pool.FindMessageTypeByName(type_name) except KeyError: return None message_type = database.GetPrototype(message_descriptor) return message_type()
Returns a protobuf message instance. Args: type_name: Fully-qualified protobuf message type name string. descriptor_pool: DescriptorPool instance. Returns: A Message instance of type matching type_name, or None if the a Descriptor wasn't found matching type_name.
def add_arguments(self, parser): """Command line arguments for Django 1.8+""" # Add the underlying test command arguments first test_command = TestCommand() test_command.add_arguments(parser) for option in OPTIONS: parser.add_argument(*option[0], **option[1])
Command line arguments for Django 1.8+
def load(self): """Load a file in text mode""" self.meta.resolved_path = self.find_data(self.meta.path) if not self.meta.resolved_path: raise ImproperlyConfigured("Data file '{}' not found".format(self.meta.path)) print("Loading:", self.meta.path) with open(self.meta.resolved_path, 'r') as fd: return fd.read()
Load a file in text mode
def fit(self, X, y, groups=None, **fit_params): # type: (...) -> PermutationImportance """Compute ``feature_importances_`` attribute and optionally fit the base estimator. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like, shape (n_samples,) The target values (integers that correspond to classes in classification, real numbers in regression). groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. **fit_params : Other estimator specific parameters Returns ------- self : object Returns self. """ self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) if pandas_available and isinstance(X, pd.DataFrame): self.scorer_ = self._wrap_scorer(self.scorer_, X.columns) if self.cv != "prefit" and self.refit: self.estimator_ = clone(self.estimator) self.estimator_.fit(X, y, **fit_params) X = check_array(X) if self.cv not in (None, "prefit"): si = self._cv_scores_importances(X, y, groups=groups, **fit_params) else: si = self._non_cv_scores_importances(X, y) scores, results = si self.scores_ = np.array(scores) self.results_ = results self.feature_importances_ = np.mean(results, axis=0) self.feature_importances_std_ = np.std(results, axis=0) return self
Compute ``feature_importances_`` attribute and optionally fit the base estimator. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like, shape (n_samples,) The target values (integers that correspond to classes in classification, real numbers in regression). groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. **fit_params : Other estimator specific parameters Returns ------- self : object Returns self.