code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def generate(self): """ Generates the report """ self._setup() header_html = self._generate_header() footer_html = self._generate_footer() results_topbar_html = self._generate_topbar("results") summary_topbar_html = self._generate_topbar("summary") logs_topbar_html = self._generate_topbar("logs") diff_topbar_html = self._generate_topbar("diff") summary_body_html = self._generate_summary_body() diff_body_html = self._generate_diff_body() summary_html = header_html + summary_topbar_html + summary_body_html + footer_html diff_html = header_html + diff_topbar_html + diff_body_html+ footer_html Reporter._make_file(summary_html, self.report_info.home_page) Reporter._make_file(diff_html,self.report_info.diff_page) log_body_html = self._generate_log_body() log_html = header_html + logs_topbar_html + log_body_html+footer_html Reporter._make_file(log_html, self.report_info.log_page) for config_name in self.report_info.config_to_test_names_map.keys(): config_dir = os.path.join(self.report_info.resource_dir, config_name) utils.makedirs(config_dir) config_body_html = self._generate_config_body(config_name) config_html = header_html + results_topbar_html + config_body_html + footer_html config_file = os.path.join(config_dir, config_name + self.report_info.report_file_sfx) Reporter._make_file(config_html, config_file) for test_name in self.data_source.get_test_names(config_name): test_body_html = self._generate_test_body(config_name, test_name) test_html = header_html + results_topbar_html + test_body_html + footer_html test_file = os.path.join(config_dir, test_name + self.report_info.report_file_sfx) Reporter._make_file(test_html, test_file)
Generates the report
def update(self, validate=False): """ Update the DB instance's status information by making a call to fetch the current instance attributes from the service. :type validate: bool :param validate: By default, if EC2 returns no data about the instance the update method returns quietly. If the validate param is True, however, it will raise a ValueError exception if no data is returned from EC2. """ rs = self.connection.get_all_dbinstances(self.id) if len(rs) > 0: for i in rs: if i.id == self.id: self.__dict__.update(i.__dict__) elif validate: raise ValueError('%s is not a valid Instance ID' % self.id) return self.status
Update the DB instance's status information by making a call to fetch the current instance attributes from the service. :type validate: bool :param validate: By default, if EC2 returns no data about the instance the update method returns quietly. If the validate param is True, however, it will raise a ValueError exception if no data is returned from EC2.
def add_account_alias(self, account, alias): """ :param account: an account object to be used as a selector :param alias: email alias address :returns: None (the API itself returns nothing) """ self.request('AddAccountAlias', { 'id': self._get_or_fetch_id(account, self.get_account), 'alias': alias, })
:param account: an account object to be used as a selector :param alias: email alias address :returns: None (the API itself returns nothing)
def get_api_name(self, func): """e.g. Convert 'do_work' to 'Do Work'""" words = func.__name__.split('_') words = [w.capitalize() for w in words] return ' '.join(words)
e.g. Convert 'do_work' to 'Do Work
def delete(group_id): """Delete group.""" group = Group.query.get_or_404(group_id) if group.can_edit(current_user): try: group.delete() except Exception as e: flash(str(e), "error") return redirect(url_for(".index")) flash(_('Successfully removed group "%(group_name)s"', group_name=group.name), 'success') return redirect(url_for(".index")) flash( _( 'You cannot delete the group %(group_name)s', group_name=group.name ), 'error' ) return redirect(url_for(".index"))
Delete group.
def remote_property(name, get_command, set_command, field_name, doc=None): """Property decorator that facilitates writing properties for values from a remote device. Arguments: name: The field name to use on the local object to store the cached property. get_command: A function that returns the remote value of the property. set_command: A function that accepts a new value for the property and sets it remotely. field_name: The name of the field to retrieve from the response message to get operations. """ def getter(self): try: return getattr(self, name) except AttributeError: value = getattr(self.sendCommand(get_command()), field_name) setattr(self, name, value) return value def setter(self, value): setattr(self, name, value) self.sendCommand(set_command(value)) return property(getter, setter, doc=doc)
Property decorator that facilitates writing properties for values from a remote device. Arguments: name: The field name to use on the local object to store the cached property. get_command: A function that returns the remote value of the property. set_command: A function that accepts a new value for the property and sets it remotely. field_name: The name of the field to retrieve from the response message to get operations.
def _get_host_details(self): """Get the system details.""" # Assuming only one system present as part of collection, # as we are dealing with iLO's here. status, headers, system = self._rest_get('/rest/v1/Systems/1') if status < 300: stype = self._get_type(system) if stype not in ['ComputerSystem.0', 'ComputerSystem.1']: msg = "%s is not a valid system type " % stype raise exception.IloError(msg) else: msg = self._get_extended_error(system) raise exception.IloError(msg) return system
Get the system details.
def __push_symbol(self, symbol): '''Ask the websocket for a symbol push. Gets instrument, orderBook, quote, and trade''' self.__send_command("getSymbol", symbol) while not {'instrument', 'trade', 'orderBook25'} <= set(self.data): sleep(0.1)
Ask the websocket for a symbol push. Gets instrument, orderBook, quote, and trade
def astype(self, col_dtypes, **kwargs): """Converts columns dtypes to given dtypes. Args: col_dtypes: Dictionary of {col: dtype,...} where col is the column name and dtype is a numpy dtype. Returns: DataFrame with updated dtypes. """ # Group indices to update by dtype for less map operations dtype_indices = {} columns = col_dtypes.keys() numeric_indices = list(self.columns.get_indexer_for(columns)) # Create Series for the updated dtypes new_dtypes = self.dtypes.copy() for i, column in enumerate(columns): dtype = col_dtypes[column] if ( not isinstance(dtype, type(self.dtypes[column])) or dtype != self.dtypes[column] ): # Only add dtype only if different if dtype in dtype_indices.keys(): dtype_indices[dtype].append(numeric_indices[i]) else: dtype_indices[dtype] = [numeric_indices[i]] # Update the new dtype series to the proper pandas dtype try: new_dtype = np.dtype(dtype) except TypeError: new_dtype = dtype if dtype != np.int32 and new_dtype == np.int32: new_dtype = np.dtype("int64") elif dtype != np.float32 and new_dtype == np.float32: new_dtype = np.dtype("float64") new_dtypes[column] = new_dtype # Update partitions for each dtype that is updated new_data = self.data for dtype in dtype_indices.keys(): def astype(df, internal_indices=[]): block_dtypes = {} for ind in internal_indices: block_dtypes[df.columns[ind]] = dtype return df.astype(block_dtypes) new_data = new_data.apply_func_to_select_indices( 0, astype, dtype_indices[dtype], keep_remaining=True ) return self.__constructor__(new_data, self.index, self.columns, new_dtypes)
Converts columns dtypes to given dtypes. Args: col_dtypes: Dictionary of {col: dtype,...} where col is the column name and dtype is a numpy dtype. Returns: DataFrame with updated dtypes.
def raise_for_response(self, responses): """ Constructs appropriate exception from list of responses and raises it. """ exception_messages = [self.client.format_exception_message(response) for response in responses] if len(exception_messages) == 1: message = exception_messages[0] else: message = "[%s]" % ", ".join(exception_messages) raise PostmarkerException(message)
Constructs appropriate exception from list of responses and raises it.
def retry(self): """No connection to device, retry connection after 15 seconds.""" self.stream = None self.config.loop.call_later(RETRY_TIMER, self.start) _LOGGER.debug('Reconnecting to %s', self.config.host)
No connection to device, retry connection after 15 seconds.
def _find_binary(binary=None): """Find the absolute path to the GnuPG binary. Also run checks that the binary is not a symlink, and check that our process real uid has exec permissions. :param str binary: The path to the GnuPG binary. :raises: :exc:`~exceptions.RuntimeError` if it appears that GnuPG is not installed. :rtype: str :returns: The absolute path to the GnuPG binary to use, if no exceptions occur. """ found = None if binary is not None: if os.path.isabs(binary) and os.path.isfile(binary): return binary if not os.path.isabs(binary): try: found = _which(binary) log.debug("Found potential binary paths: %s" % '\n'.join([path for path in found])) found = found[0] except IndexError as ie: log.info("Could not determine absolute path of binary: '%s'" % binary) elif os.access(binary, os.X_OK): found = binary if found is None: try: found = _which('gpg', abspath_only=True, disallow_symlinks=True)[0] except IndexError as ie: log.error("Could not find binary for 'gpg'.") try: found = _which('gpg2')[0] except IndexError as ie: log.error("Could not find binary for 'gpg2'.") if found is None: raise RuntimeError("GnuPG is not installed!") return found
Find the absolute path to the GnuPG binary. Also run checks that the binary is not a symlink, and check that our process real uid has exec permissions. :param str binary: The path to the GnuPG binary. :raises: :exc:`~exceptions.RuntimeError` if it appears that GnuPG is not installed. :rtype: str :returns: The absolute path to the GnuPG binary to use, if no exceptions occur.
def open_submission(self): """ Open the full submission and comment tree for the selected comment. """ url = self.get_selected_item().get('submission_permalink') if url: self.selected_page = self.open_submission_page(url)
Open the full submission and comment tree for the selected comment.
def add_data(self, minimum_address, maximum_address, data, overwrite): """Add given data to this segment. The added data must be adjacent to the current segment data, otherwise an exception is thrown. """ if minimum_address == self.maximum_address: self.maximum_address = maximum_address self.data += data elif maximum_address == self.minimum_address: self.minimum_address = minimum_address self.data = data + self.data elif (overwrite and minimum_address < self.maximum_address and maximum_address > self.minimum_address): self_data_offset = minimum_address - self.minimum_address # Prepend data. if self_data_offset < 0: self_data_offset *= -1 self.data = data[:self_data_offset] + self.data del data[:self_data_offset] self.minimum_address = minimum_address # Overwrite overlapping part. self_data_left = len(self.data) - self_data_offset if len(data) <= self_data_left: self.data[self_data_offset:self_data_offset + len(data)] = data data = bytearray() else: self.data[self_data_offset:] = data[:self_data_left] data = data[self_data_left:] # Append data. if len(data) > 0: self.data += data self.maximum_address = maximum_address else: raise AddDataError( 'data added to a segment must be adjacent to or overlapping ' 'with the original segment data')
Add given data to this segment. The added data must be adjacent to the current segment data, otherwise an exception is thrown.
def _Authenticate(self): """Authenticates the user. The authentication process works as follows: 1) We get a username and password from the user 2) We use ClientLogin to obtain an AUTH token for the user (see http://code.google.com/apis/accounts/AuthForInstalledApps.html). 3) We pass the auth token to /_ah/login on the server to obtain an authentication cookie. If login was successful, it tries to redirect us to the URL we provided. If we attempt to access the upload API without first obtaining an authentication cookie, it returns a 401 response (or a 302) and directs us to authenticate ourselves with ClientLogin. """ for i in range(3): credentials = self.auth_function() try: auth_token = self._GetAuthToken(credentials[0], credentials[1]) except ClientLoginError, e: if e.msg == "BadAuthentication": print >>sys.stderr, "Invalid username or password." continue if e.msg == "CaptchaRequired": print >>sys.stderr, ( "Please go to\n" "https://www.google.com/accounts/DisplayUnlockCaptcha\n" "and verify you are a human. Then try again.") break if e.msg == "NotVerified": print >>sys.stderr, "Account not verified." break if e.msg == "TermsNotAgreed": print >>sys.stderr, "User has not agreed to TOS." break if e.msg == "AccountDeleted": print >>sys.stderr, "The user account has been deleted." break if e.msg == "AccountDisabled": print >>sys.stderr, "The user account has been disabled." break if e.msg == "ServiceDisabled": print >>sys.stderr, "The user's access to the service has been disabled." break if e.msg == "ServiceUnavailable": print >>sys.stderr, "The service is not available; try again later." break raise self._GetAuthCookie(auth_token) return
Authenticates the user. The authentication process works as follows: 1) We get a username and password from the user 2) We use ClientLogin to obtain an AUTH token for the user (see http://code.google.com/apis/accounts/AuthForInstalledApps.html). 3) We pass the auth token to /_ah/login on the server to obtain an authentication cookie. If login was successful, it tries to redirect us to the URL we provided. If we attempt to access the upload API without first obtaining an authentication cookie, it returns a 401 response (or a 302) and directs us to authenticate ourselves with ClientLogin.
def CopyDirectory(source_dir, target_dir, override=False): ''' Recursively copy a directory tree. :param unicode source_dir: Where files will come from :param unicode target_dir: Where files will go to :param bool override: If True and target_dir already exists, it will be deleted before copying. :raises NotImplementedForRemotePathError: If trying to copy to/from remote directories ''' _AssertIsLocal(source_dir) _AssertIsLocal(target_dir) if override and IsDir(target_dir): DeleteDirectory(target_dir, skip_on_error=False) import shutil shutil.copytree(source_dir, target_dir)
Recursively copy a directory tree. :param unicode source_dir: Where files will come from :param unicode target_dir: Where files will go to :param bool override: If True and target_dir already exists, it will be deleted before copying. :raises NotImplementedForRemotePathError: If trying to copy to/from remote directories
def add_predicate(self, pred_obj): """ Adds a predicate object to the layer @type pred_obj: L{Cpredicate} @param pred_obj: the predicate object """ pred_id = pred_obj.get_id() if not pred_id in self.idx: pred_node = pred_obj.get_node() self.node.append(pred_node) self.idx[pred_id] = pred_node else: #FIXME we want new id rather than ignoring the element print('Error: trying to add new element, but id has already been given')
Adds a predicate object to the layer @type pred_obj: L{Cpredicate} @param pred_obj: the predicate object
def plotMultipleInferenceRun(stats, fields, basename, plotDir="plots"): """ Plots individual inference runs. """ if not os.path.exists(plotDir): os.makedirs(plotDir) plt.figure() colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y'] # plot request stats for i, field in enumerate(fields): fieldKey = field[0] + " C0" trace = [] for s in stats: trace += s[fieldKey] plt.plot(trace, label=field[1], color=colorList[i]) # format plt.legend(loc="upper right") plt.xlabel("Input number") plt.xticks(range(0, len(stats)*stats[0]["numSteps"]+1,5)) plt.ylabel("Number of cells") plt.ylim(-5, 55) plt.title("Inferring combined sensorimotor and temporal sequence stream") # save relPath = "{}_exp_combined.pdf".format(basename) path = os.path.join(plotDir, relPath) plt.savefig(path) plt.close()
Plots individual inference runs.
def init(celf, *, loop = None, unregister = None, message = None) : "for consistency with other classes that don’t want caller to instantiate directly." return \ celf \ ( loop = loop, unregister = unregister, message = message, )
for consistency with other classes that don’t want caller to instantiate directly.
def LE32(value, min_value=None, max_value=None, fuzzable=True, name=None, full_range=False): '''32-bit field, Little endian encoded''' return UInt32(value, min_value=min_value, max_value=max_value, encoder=ENC_INT_LE, fuzzable=fuzzable, name=name, full_range=full_range)
32-bit field, Little endian encoded
def make_client(zhmc, userid=None, password=None): """ Create a `Session` object for the specified HMC and log that on. Create a `Client` object using that `Session` object, and return it. If no userid and password are specified, and if no previous call to this method was made, userid and password are interactively inquired. Userid and password are saved in module-global variables for future calls to this method. """ global USERID, PASSWORD # pylint: disable=global-statement USERID = userid or USERID or \ six.input('Enter userid for HMC {}: '.format(zhmc)) PASSWORD = password or PASSWORD or \ getpass.getpass('Enter password for {}: '.format(USERID)) session = zhmcclient.Session(zhmc, USERID, PASSWORD) session.logon() client = zhmcclient.Client(session) print('Established logged-on session with HMC {} using userid {}'. format(zhmc, USERID)) return client
Create a `Session` object for the specified HMC and log that on. Create a `Client` object using that `Session` object, and return it. If no userid and password are specified, and if no previous call to this method was made, userid and password are interactively inquired. Userid and password are saved in module-global variables for future calls to this method.
def _buildTemplates(self): """ do all the things necessary to build the viz should be adapted to work for single-file viz, or multi-files etc. :param output_path: :return: """ # in this case we only have one contents = self._renderTemplate(self.template_name, extraContext=None) # the main url used for opening viz f = self.main_file_name main_url = self._save2File(contents, f, self.output_path) return main_url
do all the things necessary to build the viz should be adapted to work for single-file viz, or multi-files etc. :param output_path: :return:
def loadModel(self, model_file): """load q table from model_file""" with open(model_file) as f: self.q_table = json.load(f)
load q table from model_file
def showGrid( self ): """ Returns whether or not this delegate should draw its grid lines. :return <bool> """ delegate = self.itemDelegate() if ( isinstance(delegate, XTreeWidgetDelegate) ): return delegate.showGrid() return False
Returns whether or not this delegate should draw its grid lines. :return <bool>
def copy(self): """ Make deep copy of this KeyBundle :return: The copy """ kb = KeyBundle() kb._keys = self._keys[:] kb.cache_time = self.cache_time kb.verify_ssl = self.verify_ssl if self.source: kb.source = self.source kb.fileformat = self.fileformat kb.keytype = self.keytype kb.keyusage = self.keyusage kb.remote = self.remote return kb
Make deep copy of this KeyBundle :return: The copy
def change_axis(self, axis_num, channel_name): """ TODO: refactor that and set_axes what to do with ax? axis_num: int axis number channel_name: str new channel to plot on that axis """ current_channels = list(self.current_channels) if len(current_channels) == 1: if axis_num == 0: new_channels = channel_name, else: new_channels = current_channels[0], channel_name else: new_channels = list(current_channels) new_channels[axis_num] = channel_name self.set_axes(new_channels, self.ax)
TODO: refactor that and set_axes what to do with ax? axis_num: int axis number channel_name: str new channel to plot on that axis
def callback(self, event): """ Callback function to spawn a mini-browser when a feature is clicked. """ artist = event.artist ind = artist.ind limit = 5 browser = True if len(event.ind) > limit: print "more than %s genes selected; not spawning browsers" % limit browser = False for i in event.ind: feature = artist.features[ind[i]] print feature, if browser: self.minibrowser.plot(feature)
Callback function to spawn a mini-browser when a feature is clicked.
def fftw_multi_normxcorr(template_array, stream_array, pad_array, seed_ids, cores_inner, cores_outer): """ Use a C loop rather than a Python loop - in some cases this will be fast. :type template_array: dict :param template_array: :type stream_array: dict :param stream_array: :type pad_array: dict :param pad_array: :type seed_ids: list :param seed_ids: rtype: np.ndarray, list :return: 3D Array of cross-correlations and list of used channels. """ utilslib = _load_cdll('libutils') utilslib.multi_normxcorr_fftw.argtypes = [ np.ctypeslib.ndpointer(dtype=np.float32, flags=native_str('C_CONTIGUOUS')), ctypes.c_long, ctypes.c_long, ctypes.c_long, np.ctypeslib.ndpointer(dtype=np.float32, flags=native_str('C_CONTIGUOUS')), ctypes.c_long, np.ctypeslib.ndpointer(dtype=np.float32, flags=native_str('C_CONTIGUOUS')), ctypes.c_long, np.ctypeslib.ndpointer(dtype=np.intc, flags=native_str('C_CONTIGUOUS')), np.ctypeslib.ndpointer(dtype=np.intc, flags=native_str('C_CONTIGUOUS')), ctypes.c_int, ctypes.c_int, np.ctypeslib.ndpointer(dtype=np.intc, flags=native_str('C_CONTIGUOUS'))] utilslib.multi_normxcorr_fftw.restype = ctypes.c_int ''' Arguments are: templates (stacked [ch_1-t_1, ch_1-t_2, ..., ch_2-t_1, ch_2-t_2, ...]) number of templates template length number of channels image (stacked [ch_1, ch_2, ..., ch_n]) image length cross-correlations (stacked as per image) fft-length used channels (stacked as per templates) pad array (stacked as per templates) ''' # pre processing used_chans = [] template_len = template_array[seed_ids[0]].shape[1] for seed_id in seed_ids: used_chans.append(~np.isnan(template_array[seed_id]).any(axis=1)) template_array[seed_id] = ( (template_array[seed_id] - template_array[seed_id].mean(axis=-1, keepdims=True)) / ( template_array[seed_id].std(axis=-1, keepdims=True) * template_len)) template_array[seed_id] = np.nan_to_num(template_array[seed_id]) n_channels = len(seed_ids) n_templates = template_array[seed_ids[0]].shape[0] image_len = stream_array[seed_ids[0]].shape[0] fft_len = next_fast_len(template_len + image_len - 1) template_array = np.ascontiguousarray([template_array[x] for x in seed_ids], dtype=np.float32) for x in seed_ids: # Check that stream is non-zero and above variance threshold if not np.all(stream_array[x] == 0) and np.var(stream_array[x]) < 1e-8: # Apply gain stream_array *= 1e8 warnings.warn("Low variance found for {0}, applying gain " "to stabilise correlations".format(x)) stream_array = np.ascontiguousarray([stream_array[x] for x in seed_ids], dtype=np.float32) cccs = np.zeros((n_templates, image_len - template_len + 1), np.float32) used_chans_np = np.ascontiguousarray(used_chans, dtype=np.intc) pad_array_np = np.ascontiguousarray([pad_array[seed_id] for seed_id in seed_ids], dtype=np.intc) variance_warnings = np.ascontiguousarray( np.zeros(n_channels), dtype=np.intc) # call C function ret = utilslib.multi_normxcorr_fftw( template_array, n_templates, template_len, n_channels, stream_array, image_len, cccs, fft_len, used_chans_np, pad_array_np, cores_outer, cores_inner, variance_warnings) if ret < 0: raise MemoryError("Memory allocation failed in correlation C-code") elif ret not in [0, 999]: print('Error in C code (possible normalisation error)') print('Maximum cccs %f at %s' % (cccs.max(), np.unravel_index(cccs.argmax(), cccs.shape))) print('Minimum cccs %f at %s' % (cccs.min(), np.unravel_index(cccs.argmin(), cccs.shape))) raise CorrelationError("Internal correlation error") elif ret == 999: warnings.warn("Some correlations not computed, are there " "zeros in data? If not, consider increasing gain.") for i, variance_warning in enumerate(variance_warnings): if variance_warning and variance_warning > template_len: warnings.warn("Low variance found in {0} places for {1}," " check result.".format(variance_warning, seed_ids[i])) return cccs, used_chans
Use a C loop rather than a Python loop - in some cases this will be fast. :type template_array: dict :param template_array: :type stream_array: dict :param stream_array: :type pad_array: dict :param pad_array: :type seed_ids: list :param seed_ids: rtype: np.ndarray, list :return: 3D Array of cross-correlations and list of used channels.
def get_routes( feed: "Feed", date: Optional[str] = None, time: Optional[str] = None ) -> DataFrame: """ Return a subset of ``feed.routes`` Parameters ----------- feed : Feed date : string YYYYMMDD date string restricting routes to only those active on the date time : string HH:MM:SS time string, possibly with HH > 23, restricting routes to only those active during the time Returns ------- DataFrame A subset of ``feed.routes`` Notes ----- Assume the following feed attributes are not ``None``: - ``feed.routes`` - Those used in :func:`.trips.get_trips`. """ if date is None: return feed.routes.copy() trips = feed.get_trips(date, time) R = trips["route_id"].unique() return feed.routes[feed.routes["route_id"].isin(R)]
Return a subset of ``feed.routes`` Parameters ----------- feed : Feed date : string YYYYMMDD date string restricting routes to only those active on the date time : string HH:MM:SS time string, possibly with HH > 23, restricting routes to only those active during the time Returns ------- DataFrame A subset of ``feed.routes`` Notes ----- Assume the following feed attributes are not ``None``: - ``feed.routes`` - Those used in :func:`.trips.get_trips`.
def path_exists_glob(path): ''' Tests to see if path after expansion is a valid path (file or directory). Expansion allows usage of ? * and character ranges []. Tilde expansion is not supported. Returns True/False. .. versionadded:: 2014.7.0 CLI Example: .. code-block:: bash salt '*' file.path_exists_glob /etc/pam*/pass* ''' return True if glob.glob(os.path.expanduser(path)) else False
Tests to see if path after expansion is a valid path (file or directory). Expansion allows usage of ? * and character ranges []. Tilde expansion is not supported. Returns True/False. .. versionadded:: 2014.7.0 CLI Example: .. code-block:: bash salt '*' file.path_exists_glob /etc/pam*/pass*
def mix(color1, color2, pos=0.5): """ Return the mix of two colors at a state of :pos: Retruns color1 * pos + color2 * (1 - pos) """ opp_pos = 1 - pos red = color1[0] * pos + color2[0] * opp_pos green = color1[1] * pos + color2[1] * opp_pos blue = color1[2] * pos + color2[2] * opp_pos return int(red), int(green), int(blue)
Return the mix of two colors at a state of :pos: Retruns color1 * pos + color2 * (1 - pos)
def rmdir(self, parents=False): """Removes this directory, provided it is empty. Use :func:`~rpaths.Path.rmtree` if it might still contain files. :param parents: If set to True, it will also destroy every empty directory above it until an error is encountered. """ if parents: os.removedirs(self.path) else: os.rmdir(self.path)
Removes this directory, provided it is empty. Use :func:`~rpaths.Path.rmtree` if it might still contain files. :param parents: If set to True, it will also destroy every empty directory above it until an error is encountered.
def add_collaboration(self, collaboration): """Add collaboration. :param collaboration: collaboration for the current document :type collaboration: string """ collaborations = normalize_collaboration(collaboration) for collaboration in collaborations: self._append_to('collaborations', { 'value': collaboration })
Add collaboration. :param collaboration: collaboration for the current document :type collaboration: string
def exit_statistics(hostname, start_time, count_sent, count_received, min_time, avg_time, max_time, deviation): """ Print ping exit statistics """ end_time = datetime.datetime.now() duration = end_time - start_time duration_sec = float(duration.seconds * 1000) duration_ms = float(duration.microseconds / 1000) duration = duration_sec + duration_ms package_loss = 100 - ((float(count_received) / float(count_sent)) * 100) print(f'\b\b--- {hostname} ping statistics ---') try: print(f'{count_sent} packages transmitted, {count_received} received, {package_loss}% package loss, time {duration}ms') except ZeroDivisionError: print(f'{count_sent} packets transmitted, {count_received} received, 100% packet loss, time {duration}ms') print( 'rtt min/avg/max/dev = %.2f/%.2f/%.2f/%.2f ms' % ( min_time.seconds*1000 + float(min_time.microseconds)/1000, float(avg_time) / 1000, max_time.seconds*1000 + float(max_time.microseconds)/1000, float(deviation) ) )
Print ping exit statistics
def specificity(result, reference): """ Specificity. Parameters ---------- result : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. reference : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. Returns ------- specificity : float The specificity between two binary datasets, here mostly binary objects in images, which denotes the fraction of correctly returned negatives. The specificity is not symmetric. See also -------- :func:`sensitivity` Notes ----- Not symmetric. The completment of the specificity is :func:`sensitivity`. High recall means that an algorithm returned most of the irrelevant results. References ---------- .. [1] https://en.wikipedia.org/wiki/Sensitivity_and_specificity .. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion """ result = numpy.atleast_1d(result.astype(numpy.bool)) reference = numpy.atleast_1d(reference.astype(numpy.bool)) tn = numpy.count_nonzero(~result & ~reference) fp = numpy.count_nonzero(result & ~reference) try: specificity = tn / float(tn + fp) except ZeroDivisionError: specificity = 0.0 return specificity
Specificity. Parameters ---------- result : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. reference : array_like Input data containing objects. Can be any type but will be converted into binary: background where 0, object everywhere else. Returns ------- specificity : float The specificity between two binary datasets, here mostly binary objects in images, which denotes the fraction of correctly returned negatives. The specificity is not symmetric. See also -------- :func:`sensitivity` Notes ----- Not symmetric. The completment of the specificity is :func:`sensitivity`. High recall means that an algorithm returned most of the irrelevant results. References ---------- .. [1] https://en.wikipedia.org/wiki/Sensitivity_and_specificity .. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
def get_model_paths(model_dir): """Returns all model paths in the model_dir.""" all_models = gfile.Glob(os.path.join(model_dir, '*.meta')) model_filenames = [os.path.basename(m) for m in all_models] model_numbers_names = [ (shipname.detect_model_num(m), shipname.detect_model_name(m)) for m in model_filenames] model_names = sorted(model_numbers_names) return [os.path.join(model_dir, name[1]) for name in model_names]
Returns all model paths in the model_dir.
def pop_marker(self, reset): """ Pop a marker off of the marker stack. If reset is True then the iterator will be returned to the state it was in before the corresponding call to push_marker(). """ marker = self.markers.pop() if reset: # Make the values available to be read again marker.extend(self.look_ahead) self.look_ahead = marker elif self.markers: # Otherwise, reassign the values to the top marker self.markers[-1].extend(marker) else: # If there are not more markers in the stack then discard the values pass
Pop a marker off of the marker stack. If reset is True then the iterator will be returned to the state it was in before the corresponding call to push_marker().
def extract(self, html_text: str, extract_title: bool = False, extract_meta: bool = False, extract_microdata: bool = False, microdata_base_url: str = "", extract_json_ld: bool = False, extract_rdfa: bool = False, rdfa_base_url: str = "") \ -> List[Extraction]: """ Args: html_text (str): input html string to be extracted extract_title (bool): True if string of 'title' tag needs to be extracted, return as { "title": "..." } extract_meta (bool): True if string of 'meta' tags needs to be extracted, return as { "meta": { "author": "...", ...}} extract_microdata (bool): True if microdata needs to be extracted, returns as { "microdata": [...] } microdata_base_url (str): base namespace url for microdata, empty string if no base url is specified extract_json_ld (bool): True if json-ld needs to be extracted, return as { "json-ld": [...] } extract_rdfa (bool): True if rdfs needs to be extracted, returns as { "rdfa": [...] } rdfa_base_url (str): base namespace url for rdfa, empty string if no base url is specified Returns: List[Extraction]: the list of extraction or the empty list if there are no matches. """ res = list() soup = BeautifulSoup(html_text, 'html.parser') if soup.title and extract_title: title = self._wrap_data("title", soup.title.string.encode('utf-8').decode('utf-8')) res.append(title) if soup.title and extract_meta: meta_content = self._wrap_meta_content(soup.find_all("meta")) meta_data = self._wrap_data("meta", meta_content) res.append(meta_data) if extract_microdata: mde = MicrodataExtractor() mde_data = self._wrap_data("microdata", mde.extract(html_text, microdata_base_url)) res.append(mde_data) if extract_json_ld: jslde = JsonLdExtractor() jslde_data = self._wrap_data("json-ld", jslde.extract(html_text)) res.append(jslde_data) if extract_rdfa: rdfae = RDFaExtractor() rdfae_data = self._wrap_data("rdfa", rdfae.extract(html_text, rdfa_base_url)) res.append(rdfae_data) return res
Args: html_text (str): input html string to be extracted extract_title (bool): True if string of 'title' tag needs to be extracted, return as { "title": "..." } extract_meta (bool): True if string of 'meta' tags needs to be extracted, return as { "meta": { "author": "...", ...}} extract_microdata (bool): True if microdata needs to be extracted, returns as { "microdata": [...] } microdata_base_url (str): base namespace url for microdata, empty string if no base url is specified extract_json_ld (bool): True if json-ld needs to be extracted, return as { "json-ld": [...] } extract_rdfa (bool): True if rdfs needs to be extracted, returns as { "rdfa": [...] } rdfa_base_url (str): base namespace url for rdfa, empty string if no base url is specified Returns: List[Extraction]: the list of extraction or the empty list if there are no matches.
def build(port=8000, fixtures=None): """ Builds a server file. 1. Extract mock response details from all valid docstrings in existing views 2. Parse and generate mock values 3. Create a store of all endpoints and data 4. Construct server file """ extractor = Extractor() parser = Parser(extractor.url_details, fixtures) parser.parse() url_details = parser.results _store = get_store(url_details) store = json.dumps(_store) variables = str(Variable('let', 'store', store)) functions = DATA_FINDER + GET_HANDLER + MODIFY_HANDLER + POST_HANDLER endpoints = [] endpoint_uris = [] for u in parser.results: endpoint = Endpoint() if u['method'].lower() in ['get', 'post']: method = u['method'].lower() else: method = 'modify' response = str(ResponseBody(method)) # Check in store if the base url has individual instances u['url'], list_url = clean_url(u['full_url'], _store, u['method'].lower()) if list_url is not None and u['method'].lower() == 'get': list_endpoint = Endpoint() list_endpoint.construct('get', list_url, response) if str(list_endpoint) not in endpoints: endpoints.append(str(list_endpoint)) if list_endpoint.uri not in endpoint_uris: endpoint_uris.append(list_endpoint.uri) if method == 'modify': without_prefix = re.sub(r'\/(\w+)\_\_', '', u['url']) for k, v in _store.items(): if without_prefix in k: options = v.get('options', '{}') options = ast.literal_eval(options) modifiers = [] if options is not None: modifiers = options.get('modifiers', []) if modifiers: for mod in modifiers: if u['method'].lower() == mod: mod_endpoint = Endpoint() uri = without_prefix if v.get('position') is not None and v['position'] == 'url': uri = re.sub(r'\/?\_\_key', '/:id', u['full_url']) mod_endpoint.construct(u['method'].lower(), uri, response) if str(mod_endpoint) not in endpoints: endpoints.append(str(mod_endpoint)) if mod_endpoint.uri not in endpoint_uris: endpoint_uris.append(mod_endpoint.uri) else: endpoint.construct(u['method'], u['url'], response) if str(endpoint) not in endpoints: endpoints.append(str(endpoint)) if endpoint.uri not in endpoint_uris: endpoint_uris.append(endpoint.uri) endpoints = ''.join(endpoints) express = ExpressServer() express.construct(variables, functions, endpoints, port) return express
Builds a server file. 1. Extract mock response details from all valid docstrings in existing views 2. Parse and generate mock values 3. Create a store of all endpoints and data 4. Construct server file
def get_settings(): """ This function returns a dict containing default settings """ s = getattr(settings, 'CLAMAV_UPLOAD', {}) s = { 'CONTENT_TYPE_CHECK_ENABLED': s.get('CONTENT_TYPE_CHECK_ENABLED', False), # LAST_HANDLER is not a user configurable option; we return # it with the settings dict simply because it's convenient. 'LAST_HANDLER': getattr(settings, 'FILE_UPLOAD_HANDLERS')[-1] } return s
This function returns a dict containing default settings
def encode(B): """ Encode data using Hamming(7, 4) code. E.g.: encode([0, 0, 1, 1]) encode([[0, 0, 0, 1], [0, 1, 0, 1]]) :param array B: binary data to encode (must be shaped as (4, ) or (-1, 4)). """ B = array(B) flatten = False if len(B.shape) == 1: flatten = True B = B.reshape(1, -1) if B.shape[1] != data_size: raise ValueError('Data must be shaped as (4, ) or (-1, 4)') C = dot(G, B.T).T % 2 if flatten: C = C.flatten() return C
Encode data using Hamming(7, 4) code. E.g.: encode([0, 0, 1, 1]) encode([[0, 0, 0, 1], [0, 1, 0, 1]]) :param array B: binary data to encode (must be shaped as (4, ) or (-1, 4)).
def getResponseAction(self, ps, action): '''Returns response WS-Action if available action -- request WS-Action value. ''' opName = self.getOperationName(ps, action) if self.wsAction.has_key(opName) is False: raise WSActionNotSpecified, 'wsAction dictionary missing key(%s)' %opName return self.wsAction[opName]
Returns response WS-Action if available action -- request WS-Action value.
def create_domain(self, service_id, version_number, name, comment=None): """Create a domain for a particular service and version.""" body = self._formdata({ "name": name, "comment": comment, }, FastlyDomain.FIELDS) content = self._fetch("/service/%s/version/%d/domain" % (service_id, version_number), method="POST", body=body) return FastlyDomain(self, content)
Create a domain for a particular service and version.
def _sort_shared_logical_disks(logical_disks): """Sort the logical disks based on the following conditions. When the share_physical_disks is True make sure we create the volume which needs more disks first. This avoids the situation of insufficient disks for some logical volume request. For example, - two logical disk with number of disks - LD1(3), LD2(4) - have 4 physical disks In this case, if we consider LD1 first then LD2 will fail since not enough disks available to create LD2. So follow a order for allocation when share_physical_disks is True. Also RAID1 can share only when there is logical volume with only 2 disks. So make sure we create RAID 1 first when share_physical_disks is True. And RAID 1+0 can share only when the logical volume with even number of disks. :param logical_disks: 'logical_disks' to be sorted for shared logical disks. :returns: the logical disks sorted based the above conditions. """ is_shared = (lambda x: True if ('share_physical_disks' in x and x['share_physical_disks']) else False) num_of_disks = (lambda x: x['number_of_physical_disks'] if 'number_of_physical_disks' in x else constants.RAID_LEVEL_MIN_DISKS[x['raid_level']]) # Separate logical disks based on share_physical_disks value. # 'logical_disks_shared' when share_physical_disks is True and # 'logical_disks_nonshared' when share_physical_disks is False logical_disks_shared = [] logical_disks_nonshared = [] for x in logical_disks: target = (logical_disks_shared if is_shared(x) else logical_disks_nonshared) target.append(x) # Separete logical disks with raid 1 from the 'logical_disks_shared' into # 'logical_disks_shared_raid1' and remaining as # 'logical_disks_shared_excl_raid1'. logical_disks_shared_raid1 = [] logical_disks_shared_excl_raid1 = [] for x in logical_disks_shared: target = (logical_disks_shared_raid1 if x['raid_level'] == '1' else logical_disks_shared_excl_raid1) target.append(x) # Sort the 'logical_disks_shared' in reverse order based on # 'number_of_physical_disks' attribute, if provided, otherwise minimum # disks required to create the logical volume. logical_disks_shared = sorted(logical_disks_shared_excl_raid1, reverse=True, key=num_of_disks) # Move RAID 1+0 to first in 'logical_disks_shared' when number of physical # disks needed to create logical volume cannot be shared with odd number of # disks and disks higher than that of RAID 1+0. check = True for x in logical_disks_shared: if x['raid_level'] == "1+0": x_num = num_of_disks(x) for y in logical_disks_shared: if y['raid_level'] != "1+0": y_num = num_of_disks(y) if x_num < y_num: check = (True if y_num % 2 == 0 else False) if check: break if not check: logical_disks_shared.remove(x) logical_disks_shared.insert(0, x) check = True # Final 'logical_disks_sorted' list should have non shared logical disks # first, followed by shared logical disks with RAID 1, and finally by the # shared logical disks sorted based on number of disks and RAID 1+0 # condition. logical_disks_sorted = (logical_disks_nonshared + logical_disks_shared_raid1 + logical_disks_shared) return logical_disks_sorted
Sort the logical disks based on the following conditions. When the share_physical_disks is True make sure we create the volume which needs more disks first. This avoids the situation of insufficient disks for some logical volume request. For example, - two logical disk with number of disks - LD1(3), LD2(4) - have 4 physical disks In this case, if we consider LD1 first then LD2 will fail since not enough disks available to create LD2. So follow a order for allocation when share_physical_disks is True. Also RAID1 can share only when there is logical volume with only 2 disks. So make sure we create RAID 1 first when share_physical_disks is True. And RAID 1+0 can share only when the logical volume with even number of disks. :param logical_disks: 'logical_disks' to be sorted for shared logical disks. :returns: the logical disks sorted based the above conditions.
def get_metadata(doi): """Returns the metadata of an article given its DOI from CrossRef as a JSON dict""" url = crossref_url + 'works/' + doi res = requests.get(url) if res.status_code != 200: logger.info('Could not get CrossRef metadata for DOI %s, code %d' % (doi, res.status_code)) return None raw_message = res.json() metadata = raw_message.get('message') return metadata
Returns the metadata of an article given its DOI from CrossRef as a JSON dict
def apply_fseries_time_shift(htilde, dt, kmin=0, copy=True): """Shifts a frequency domain waveform in time. The waveform is assumed to be sampled at equal frequency intervals. """ if htilde.precision != 'single': raise NotImplementedError("CUDA version of apply_fseries_time_shift only supports single precision") if copy: out = htilde.copy() else: out = htilde kmin = numpy.int32(kmin) kmax = numpy.int32(len(htilde)) nb = int(numpy.ceil(kmax / nt_float)) if nb > 1024: raise ValueError("More than 1024 blocks not supported yet") phi = numpy.float32(-2 * numpy.pi * dt * htilde.delta_f) fseries_ts_fn.prepared_call((nb, 1), (nt, 1, 1), out.data.gpudata, phi, kmin, kmax) if copy: htilde = FrequencySeries(out, delta_f=htilde.delta_f, epoch=htilde.epoch, copy=False) return htilde
Shifts a frequency domain waveform in time. The waveform is assumed to be sampled at equal frequency intervals.
def parse(self, path): """Extracts a dictionary of values from the XML file at the specified path.""" #Load the template that will be used for parsing the values. expath, template, root = self._load_template(path) if expath is not None: values = template.parse(root) return (values, template)
Extracts a dictionary of values from the XML file at the specified path.
def add_dat_file(filename, settings, container=None, **kwargs): """ Read a RES2DINV-style file produced by the ABEM export program. """ # each type is read by a different function importers = { # general array type 11: _read_general_type, } file_type, content = _read_file(filename) if file_type not in importers: raise Exception( 'type of RES2DINV data file not recognized: {0}'.format(file_type) ) header, data = importers[file_type](content, settings) timestep = settings.get('timestep', 0) # add timestep column data['timestep'] = timestep if container is None: container = ERT(data) else: container.data = pd.concat((container.data, data)) return container
Read a RES2DINV-style file produced by the ABEM export program.
def period_break(dates, period): """ Returns the indices where the given period changes. Parameters ---------- dates : PeriodIndex Array of intervals to monitor. period : string Name of the period to monitor. """ current = getattr(dates, period) previous = getattr(dates - 1 * dates.freq, period) return np.nonzero(current - previous)[0]
Returns the indices where the given period changes. Parameters ---------- dates : PeriodIndex Array of intervals to monitor. period : string Name of the period to monitor.
def toDict(self): """ Return a dictionary with the DataFrame data. """ d = {} nindices = self.getNumIndices() for i in range(self.getNumRows()): row = list(self.getRowByIndex(i)) if nindices > 1: key = tuple(row[:nindices]) elif nindices == 1: key = row[0] else: key = None if len(row) - nindices == 0: d[key] = None elif len(row) - nindices == 1: d[key] = row[nindices] else: d[key] = tuple(row[nindices:]) return d
Return a dictionary with the DataFrame data.
async def move_to(self, channel: discord.VoiceChannel): """ Moves this player to a voice channel. Parameters ---------- channel : discord.VoiceChannel """ if channel.guild != self.channel.guild: raise TypeError("Cannot move to a different guild.") self.channel = channel await self.connect()
Moves this player to a voice channel. Parameters ---------- channel : discord.VoiceChannel
def do_login(session, for_what): """ Performs a login handshake with a user on the command-line. This method will handle all of the follow-up requests (e.g. capcha or two-factor). A login that requires two-factor looks like this:: >>> import mwapi.cli >>> import mwapi >>> mwapi.cli.do_login(mwapi.Session("https://en.wikipedia.org"), "English Wikipedia") Log into English Wikipedia Username: Halfak (WMF) Passord: Please enter verification code from your mobile app Token(OATHToken): 234567 :Parameters: session : :class:`mwapi.Session` A session object to use for login for_what : `str` A name to display to the use (for what they are logging into) """ # noqa username, password = request_username_password(for_what) try: session.login(username, password) except ClientInteractionRequest as cir: params = request_interaction(cir) session.continue_login(cir.login_token, **params)
Performs a login handshake with a user on the command-line. This method will handle all of the follow-up requests (e.g. capcha or two-factor). A login that requires two-factor looks like this:: >>> import mwapi.cli >>> import mwapi >>> mwapi.cli.do_login(mwapi.Session("https://en.wikipedia.org"), "English Wikipedia") Log into English Wikipedia Username: Halfak (WMF) Passord: Please enter verification code from your mobile app Token(OATHToken): 234567 :Parameters: session : :class:`mwapi.Session` A session object to use for login for_what : `str` A name to display to the use (for what they are logging into)
def to_xdr_object(self): """Get an XDR object representation of this :class:`TransactionEnvelope`. """ tx = self.tx.to_xdr_object() return Xdr.types.TransactionEnvelope(tx, self.signatures)
Get an XDR object representation of this :class:`TransactionEnvelope`.
def set_tuning(self, tuning): """Set the tuning attribute on both the Track and its instrument (when available). Tuning should be a StringTuning or derivative object. """ if self.instrument: self.instrument.tuning = tuning self.tuning = tuning return self
Set the tuning attribute on both the Track and its instrument (when available). Tuning should be a StringTuning or derivative object.
def mysql_timestamp_converter(s): """Convert a MySQL TIMESTAMP to a Timestamp object.""" # MySQL>4.1 returns TIMESTAMP in the same format as DATETIME if s[4] == '-': return DateTime_or_None(s) s = s + "0"*(14-len(s)) # padding parts = map(int, filter(None, (s[:4],s[4:6],s[6:8], s[8:10],s[10:12],s[12:14]))) try: return Timestamp(*parts) except (SystemExit, KeyboardInterrupt): raise except: return None
Convert a MySQL TIMESTAMP to a Timestamp object.
def fillna(self, value=None, method=None, limit=None): """ Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series If a scalar value is passed it is used to fill all missing values. Alternatively, a Series or dict can be used to fill in different values for each index. The value should not be a list. The value(s) passed should either be in the categories or should be NaN. method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use NEXT valid observation to fill gap limit : int, default None (Not implemented yet for Categorical!) If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Returns ------- filled : Categorical with NA/NaN filled """ value, method = validate_fillna_kwargs( value, method, validate_scalar_dict_value=False ) if value is None: value = np.nan if limit is not None: raise NotImplementedError("specifying a limit for fillna has not " "been implemented yet") codes = self._codes # pad / bfill if method is not None: values = self.to_dense().reshape(-1, len(self)) values = interpolate_2d(values, method, 0, None, value).astype(self.categories.dtype)[0] codes = _get_codes_for_values(values, self.categories) else: # If value is a dict or a Series (a dict value has already # been converted to a Series) if isinstance(value, ABCSeries): if not value[~value.isin(self.categories)].isna().all(): raise ValueError("fill value must be in categories") values_codes = _get_codes_for_values(value, self.categories) indexer = np.where(values_codes != -1) codes[indexer] = values_codes[values_codes != -1] # If value is not a dict or Series it should be a scalar elif is_hashable(value): if not isna(value) and value not in self.categories: raise ValueError("fill value must be in categories") mask = codes == -1 if mask.any(): codes = codes.copy() if isna(value): codes[mask] = -1 else: codes[mask] = self.categories.get_loc(value) else: raise TypeError('"value" parameter must be a scalar, dict ' 'or Series, but you passed a ' '"{0}"'.format(type(value).__name__)) return self._constructor(codes, dtype=self.dtype, fastpath=True)
Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series If a scalar value is passed it is used to fill all missing values. Alternatively, a Series or dict can be used to fill in different values for each index. The value should not be a list. The value(s) passed should either be in the categories or should be NaN. method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use NEXT valid observation to fill gap limit : int, default None (Not implemented yet for Categorical!) If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Returns ------- filled : Categorical with NA/NaN filled
def export_opml(): """Export an OPML feed list""" with Database("feeds") as feeds: # Thanks to the canto project- used under the GPL print("""<opml version="1.0">""") print("""<body>""") # Accurate but slow. for name in list(feeds.keys()): kind = feedparser.parse(feeds[name]).version if kind[:4] == 'atom': t = 'pie' elif kind[:3] == 'rss': t = 'rss' print("""\t<outline text="%s" xmlUrl="%s" type="%s" />""" % (name, feeds[name], t)) print("""</body>""") print("""</opml>""")
Export an OPML feed list
def auth(username, password): """ Middleware implementing authentication via LOGIN. Most of the time this middleware needs to be placed *after* TLS. :param username: Username to login with. :param password: Password of the user. """ def middleware(conn): conn.login(username, password) return middleware
Middleware implementing authentication via LOGIN. Most of the time this middleware needs to be placed *after* TLS. :param username: Username to login with. :param password: Password of the user.
def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if PY2: if '__str__' not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesn't define __str__()." % klass.__name__) klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass
A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class.
def openid_authorization_validator(self, request): """Additional validation when following the Authorization Code flow. """ request_info = super(HybridGrant, self).openid_authorization_validator(request) if not request_info: # returns immediately if OAuth2.0 return request_info # REQUIRED if the Response Type of the request is `code # id_token` or `code id_token token` and OPTIONAL when the # Response Type of the request is `code token`. It is a string # value used to associate a Client session with an ID Token, # and to mitigate replay attacks. The value is passed through # unmodified from the Authentication Request to the ID # Token. Sufficient entropy MUST be present in the `nonce` # values used to prevent attackers from guessing values. For # implementation notes, see Section 15.5.2. if request.response_type in ["code id_token", "code id_token token"]: if not request.nonce: raise InvalidRequestError( request=request, description='Request is missing mandatory nonce parameter.' ) return request_info
Additional validation when following the Authorization Code flow.
def upload(self, file_obj=None, file_path=None, name=None, data=None): """ Upload an image and return its path on the server. Either `file_obj` or `file_path` or `name` and `data` has to be specified. :param file_obj: A file object to upload :param file_path: A file path to upload from :param name: A file name for uploading :param data: The file content to upload :return: The path of the uploaded file on the server """ close = False if file_obj: file_name, content = os.path.basename(file_obj.name), file_obj elif file_path: file_name, content = os.path.basename(file_path), open(file_path, 'rb') close = True elif name and data: file_name, content = name, data else: raise GhostException( 400, 'Either `file_obj` or `file_path` or ' '`name` and `data` needs to be specified' ) try: content_type, _ = mimetypes.guess_type(file_name) file_arg = (file_name, content, content_type) response = self.execute_post('uploads/', files={'uploadimage': file_arg}) return response finally: if close: content.close()
Upload an image and return its path on the server. Either `file_obj` or `file_path` or `name` and `data` has to be specified. :param file_obj: A file object to upload :param file_path: A file path to upload from :param name: A file name for uploading :param data: The file content to upload :return: The path of the uploaded file on the server
def define_mask_borders(image2d, sought_value, nadditional=0): """Generate mask avoiding undesired values at the borders. Set to True image borders with values equal to 'sought_value' Parameters ---------- image2d : numpy array Initial 2D image. sought_value : int, float, bool Pixel value that indicates missing data in the spectrum. nadditional : int Number of additional pixels to be masked at each border. Returns ------- mask2d : numpy array 2D mask. borders : list of tuples List of tuples (jmin, jmax) with the border limits (in array coordinates) found by find_pix_borders. """ # input image size naxis2, naxis1 = image2d.shape # initialize mask mask2d = np.zeros((naxis2, naxis1), dtype=bool) # initialize list to store borders borders = [] for i in range(naxis2): # only spectra with values different from 'sought_value' jborder_min, jborder_max = find_pix_borders( image2d[i, :], sought_value=sought_value ) borders.append((jborder_min, jborder_max)) if (jborder_min, jborder_max) != (-1, naxis1): if jborder_min != -1: j1 = 0 j2 = jborder_min + nadditional + 1 mask2d[i, j1:j2] = True if jborder_max != naxis1: j1 = jborder_max - nadditional j2 = naxis1 mask2d[i, j1:j2] = True return mask2d, borders
Generate mask avoiding undesired values at the borders. Set to True image borders with values equal to 'sought_value' Parameters ---------- image2d : numpy array Initial 2D image. sought_value : int, float, bool Pixel value that indicates missing data in the spectrum. nadditional : int Number of additional pixels to be masked at each border. Returns ------- mask2d : numpy array 2D mask. borders : list of tuples List of tuples (jmin, jmax) with the border limits (in array coordinates) found by find_pix_borders.
def _GetVisibilityPolicy(): """If a debugger configuration is found, create a visibility policy.""" try: visibility_config = yaml_data_visibility_config_reader.OpenAndRead() except yaml_data_visibility_config_reader.Error as err: return error_data_visibility_policy.ErrorDataVisibilityPolicy( 'Could not process debugger config: %s' % err) if visibility_config: return glob_data_visibility_policy.GlobDataVisibilityPolicy( visibility_config.blacklist_patterns, visibility_config.whitelist_patterns) return None
If a debugger configuration is found, create a visibility policy.
def to_binary(s, encoding='utf8'): """Portable cast function. In python 2 the ``str`` function which is used to coerce objects to bytes does not accept an encoding argument, whereas python 3's ``bytes`` function requires one. :param s: object to be converted to binary_type :return: binary_type instance, representing s. """ if PY3: # pragma: no cover return s if isinstance(s, binary_type) else binary_type(s, encoding=encoding) return binary_type(s)
Portable cast function. In python 2 the ``str`` function which is used to coerce objects to bytes does not accept an encoding argument, whereas python 3's ``bytes`` function requires one. :param s: object to be converted to binary_type :return: binary_type instance, representing s.
def output(self): """ Produce a classic generator for this cell's final results. """ starters = self.finalize() try: yield from self._output(starters) finally: self.close()
Produce a classic generator for this cell's final results.
def raw_sensor_strings(self): """ Reads the raw strings from the kernel module sysfs interface :returns: raw strings containing all bytes from the sensor memory :rtype: str :raises NoSensorFoundError: if the sensor could not be found :raises SensorNotReadyError: if the sensor is not ready yet """ try: with open(self.sensorpath, "r") as f: data = f.readlines() except IOError: raise NoSensorFoundError(self.type_name, self.id) if data[0].strip()[-3:] != "YES": raise SensorNotReadyError(self) return data
Reads the raw strings from the kernel module sysfs interface :returns: raw strings containing all bytes from the sensor memory :rtype: str :raises NoSensorFoundError: if the sensor could not be found :raises SensorNotReadyError: if the sensor is not ready yet
def _check_key_value_types(obj, key_type, value_type, key_check=isinstance, value_check=isinstance): '''Ensures argument obj is a dictionary, and enforces that the keys/values conform to the types specified by key_type, value_type. ''' if not isinstance(obj, dict): raise_with_traceback(_type_mismatch_error(obj, dict)) if key_type is str: key_type = string_types if value_type is str: value_type = string_types for key, value in obj.items(): if key_type and not key_check(key, key_type): raise_with_traceback( CheckError( 'Key in dictionary mismatches type. Expected {key_type}. Got {obj_repr}'.format( key_type=repr(key_type), obj_repr=repr(key) ) ) ) if value_type and not value_check(value, value_type): raise_with_traceback( CheckError( 'Value in dictionary mismatches expected type for key {key}. Expected value ' 'of type {vtype}. Got value {value} of type {obj_type}.'.format( vtype=repr(value_type), obj_type=type(value), key=key, value=value ) ) ) return obj
Ensures argument obj is a dictionary, and enforces that the keys/values conform to the types specified by key_type, value_type.
def fit_predict(self, sequences, y=None): """Performs clustering on X and returns cluster labels. Parameters ---------- sequences : list of array-like, each of shape [sequence_length, n_features] A list of multivariate timeseries. Each sequence may have a different length, but they all must have the same number of features. Returns ------- Y : list of ndarray, each of shape [sequence_length, ] Cluster labels """ if hasattr(super(MultiSequenceClusterMixin, self), 'fit_predict'): check_iter_of_sequences(sequences, allow_trajectory=self._allow_trajectory) labels = super(MultiSequenceClusterMixin, self).fit_predict(sequences) else: self.fit(sequences) labels = self.predict(sequences) if not isinstance(labels, list): labels = self._split(labels) return labels
Performs clustering on X and returns cluster labels. Parameters ---------- sequences : list of array-like, each of shape [sequence_length, n_features] A list of multivariate timeseries. Each sequence may have a different length, but they all must have the same number of features. Returns ------- Y : list of ndarray, each of shape [sequence_length, ] Cluster labels
def locale_escape(string, errors='replace'): ''' Mangle non-supported characters, for savages with ascii terminals. ''' encoding = locale.getpreferredencoding() string = string.encode(encoding, errors).decode('utf8') return string
Mangle non-supported characters, for savages with ascii terminals.
def update(self, τ: float = 1.0, update_indicators=True, dampen=False): """ Advance the model by one time step. """ for n in self.nodes(data=True): n[1]["next_state"] = n[1]["update_function"](n) for n in self.nodes(data=True): n[1]["rv"].dataset = n[1]["next_state"] for n in self.nodes(data=True): for i in range(self.res): self.s0[i][n[0]] = n[1]["rv"].dataset[i] if dampen: self.s0[i][f"∂({n[0]})/∂t"] = self.s0_original[ f"∂({n[0]})/∂t" ] * exp(-τ * self.t) if update_indicators: for indicator in n[1]["indicators"].values(): indicator.samples = np.random.normal( indicator.mean * np.array(n[1]["rv"].dataset), scale=0.01, ) self.t += self.Δt
Advance the model by one time step.
def generate_random_string(number_of_random_chars=8, character_set=string.ascii_letters): """ Generate a series of random characters. Kwargs: number_of_random_chars (int) : Number of characters long character_set (str): Specify a character set. Default is ASCII """ return u('').join(random.choice(character_set) for _ in range(number_of_random_chars))
Generate a series of random characters. Kwargs: number_of_random_chars (int) : Number of characters long character_set (str): Specify a character set. Default is ASCII
def get_portchannel_info_by_intf_output_lacp_partner_brcd_state(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf") config = get_portchannel_info_by_intf output = ET.SubElement(get_portchannel_info_by_intf, "output") lacp = ET.SubElement(output, "lacp") partner_brcd_state = ET.SubElement(lacp, "partner-brcd-state") partner_brcd_state.text = kwargs.pop('partner_brcd_state') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def cache(descriptor=None, *, store: IStore = None): ''' usage: ``` py @cache @property def name(self): pass ``` ''' if descriptor is None: return functools.partial(cache, store=store) hasattrs = { 'get': hasattr(descriptor, '__get__'), 'set': hasattr(descriptor, '__set__'), 'del': hasattr(descriptor, '__delete__') } descriptor_name = get_descriptor_name(descriptor) # pylint: disable=R0903,C0111 class CacheDescriptor(ICacheDescriptor): def __init__(self): if descriptor_name is not None: self.__name__ = descriptor_name cache_descriptor = CacheDescriptor() if store is None: store = FieldStore(cache_descriptor) elif not isinstance(store, IStore): raise TypeError(f'store must be a {IStore}.') if hasattrs['get']: def get(self, obj, objtype): if obj is None: return descriptor.__get__(obj, objtype) value = store.get(self, obj, defval=NOVALUE) if value is NOVALUE: value = descriptor.__get__(obj, objtype) store.set(self, obj, value) return value CacheDescriptor.__get__ = get if hasattrs['set']: def set(self, obj, value): store.pop(self, obj) descriptor.__set__(obj, value) CacheDescriptor.__set__ = set if hasattrs['del']: def delete(self, obj): store.pop(self, obj) descriptor.__delete__(obj) CacheDescriptor.__delete__ = delete return cache_descriptor
usage: ``` py @cache @property def name(self): pass ```
def _unpack(self, data): ''' Unpack a struct from bytes. For parser internal use. ''' #self._logger.log(logging.DEBUG, 'unpacking %r', self) current = self while current is not None: data = current._parser.unpack(data, current) last = current current = getattr(current, '_sub', None) _set(last, '_extra', data)
Unpack a struct from bytes. For parser internal use.
def validateDatetime(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, formats=('%Y/%m/%d %H:%M:%S', '%y/%m/%d %H:%M:%S', '%m/%d/%Y %H:%M:%S', '%m/%d/%y %H:%M:%S', '%x %H:%M:%S', '%Y/%m/%d %H:%M', '%y/%m/%d %H:%M', '%m/%d/%Y %H:%M', '%m/%d/%y %H:%M', '%x %H:%M', '%Y/%m/%d %H:%M:%S', '%y/%m/%d %H:%M:%S', '%m/%d/%Y %H:%M:%S', '%m/%d/%y %H:%M:%S', '%x %H:%M:%S'), excMsg=None): """Raises ValidationException if value is not a datetime formatted in one of the formats formats. Returns a datetime.datetime object of value. * value (str): The value being validated as a datetime. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * formats: A tuple of strings that can be passed to time.strftime, dictating the possible formats for a valid datetime. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateDatetime('2018/10/31 12:00:01') datetime.datetime(2018, 10, 31, 12, 0, 1) >>> pysv.validateDatetime('10/31/2018 12:00:01') datetime.datetime(2018, 10, 31, 12, 0, 1) >>> pysv.validateDatetime('10/31/2018') Traceback (most recent call last): ... pysimplevalidate.ValidationException: '10/31/2018' is not a valid date and time. """ # Reuse the logic in _validateToDateTimeFormat() for this function. try: return _validateToDateTimeFormat(value, formats, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) except ValidationException: _raiseValidationException(_('%r is not a valid date and time.') % (_errstr(value)), excMsg)
Raises ValidationException if value is not a datetime formatted in one of the formats formats. Returns a datetime.datetime object of value. * value (str): The value being validated as a datetime. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * formats: A tuple of strings that can be passed to time.strftime, dictating the possible formats for a valid datetime. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateDatetime('2018/10/31 12:00:01') datetime.datetime(2018, 10, 31, 12, 0, 1) >>> pysv.validateDatetime('10/31/2018 12:00:01') datetime.datetime(2018, 10, 31, 12, 0, 1) >>> pysv.validateDatetime('10/31/2018') Traceback (most recent call last): ... pysimplevalidate.ValidationException: '10/31/2018' is not a valid date and time.
def is_entailed_by(self, other): """ Returns True iff the values in this list can be entailed by the other list (ie, this list is a prefix of the other) """ other = ListCell.coerce(other) if other.size() < self.size(): # other is bigger, can't be entailed return False if self.value is None: # list is empty return True # see if any values in the shorter list are contradictory or # unequal for i, oval in enumerate(other.value): if i == len(self.value): break if hasattr(self.value[i], 'is_entailed_by') and \ not self.value[i].is_entailed_by(oval): # compare cells return False elif self.value[i] != oval: return False return True
Returns True iff the values in this list can be entailed by the other list (ie, this list is a prefix of the other)
def formatHeadings(self, text, isMain): """ This function accomplishes several tasks: 1) Auto-number headings if that option is enabled 2) Add an [edit] link to sections for logged in users who have enabled the option 3) Add a Table of contents on the top for users who have enabled the option 4) Auto-anchor headings It loops through all headlines, collects the necessary data, then splits up the string and re-inserts the newly formatted headlines. """ doNumberHeadings = False showEditLink = True # Can User Edit if text.find(u"__NOEDITSECTION__") != -1: showEditLink = False text = text.replace(u"__NOEDITSECTION__", u"") # Get all headlines for numbering them and adding funky stuff like [edit] # links - this is for later, but we need the number of headlines right now matches = _headerPat.findall(text) numMatches = len(matches) # if there are fewer than 4 headlines in the article, do not show TOC # unless it's been explicitly enabled. enoughToc = self.show_toc and (numMatches >= 4 or text.find(u"<!--MWTOC-->") != -1) # Allow user to stipulate that a page should have a "new section" # link added via __NEWSECTIONLINK__ showNewSection = False if text.find(u"__NEWSECTIONLINK__") != -1: showNewSection = True text = text.replace(u"__NEWSECTIONLINK__", u"") # if the string __FORCETOC__ (not case-sensitive) occurs in the HTML, # override above conditions and always show TOC above first header if text.find(u"__FORCETOC__") != -1: self.show_toc = True enoughToc = True text = text.replace(u"__FORCETOC__", u"") # Never ever show TOC if no headers if numMatches < 1: enoughToc = False # headline counter headlineCount = 0 sectionCount = 0 # headlineCount excluding template sections # Ugh .. the TOC should have neat indentation levels which can be # passed to the skin functions. These are determined here toc = [] head = {} sublevelCount = {} levelCount = {} toclevel = 0 level = 0 prevlevel = 0 toclevel = 0 prevtoclevel = 0 refers = {} refcount = {} wgMaxTocLevel = 5 for match in matches: headline = match[2] istemplate = False templatetitle = u'' templatesection = 0 numbering = [] m = _templateSectionPat.search(headline) if m: istemplate = True templatetitle = b64decode(m[0]) templatesection = 1 + int(b64decode(m[1])) headline = _templateSectionPat.sub(u'', headline) if toclevel: prevlevel = level prevtoclevel = toclevel level = matches[headlineCount][0] if doNumberHeadings or enoughToc: if level > prevlevel: toclevel += 1 sublevelCount[toclevel] = 0 if toclevel < wgMaxTocLevel: toc.append(u'\n<ul>') elif level < prevlevel and toclevel > 1: # Decrease TOC level, find level to jump to if toclevel == 2 and level < levelCount[1]: toclevel = 1 else: for i in range(toclevel, 0, -1): if levelCount[i] == level: # Found last matching level toclevel = i break elif levelCount[i] < level: toclevel = i + 1 break if toclevel < wgMaxTocLevel: toc.append(u"</li>\n") toc.append(u"</ul>\n</li>\n" * max(prevtoclevel - toclevel, 0)) else: if toclevel < wgMaxTocLevel: toc.append(u"</li>\n") levelCount[toclevel] = level # count number of headlines for each level sublevelCount[toclevel] += 1 for i in range(1, toclevel+1): if sublevelCount[i]: numbering.append(to_unicode(sublevelCount[i])) # The canonized header is a version of the header text safe to use for links # Avoid insertion of weird stuff like <math> by expanding the relevant sections canonized_headline = self.unstrip(headline) canonized_headline = self.unstripNoWiki(canonized_headline) # -- don't know what to do with this yet. # Remove link placeholders by the link text. # <!--LINK number--> # turns into # link text with suffix # $canonized_headline = preg_replace( '/<!--LINK ([0-9]*)-->/e', # "\$this->mLinkHolders['texts'][\$1]", # $canonized_headline ); # $canonized_headline = preg_replace( '/<!--IWLINK ([0-9]*)-->/e', # "\$this->mInterwikiLinkHolders['texts'][\$1]", # $canonized_headline ); # strip out HTML canonized_headline = _tagPat.sub(u'', canonized_headline) tocline = canonized_headline.strip() # Save headline for section edit hint before it's escaped headline_hint = tocline canonized_headline = self.escapeId(tocline) refers[headlineCount] = canonized_headline # count how many in assoc. array so we can track dupes in anchors if canonized_headline not in refers: refers[canonized_headline] = 1 else: refers[canonized_headline] += 1 refcount[headlineCount] = refers[canonized_headline] numbering = '.'.join(numbering) # Don't number the heading if it is the only one (looks silly) if doNumberHeadings and numMatches > 1: # the two are different if the line contains a link headline = numbering + u' ' + headline # Create the anchor for linking from the TOC to the section anchor = canonized_headline; if refcount[headlineCount] > 1: anchor += u'_' + unicode(refcount[headlineCount]) if enoughToc: toc.append(u'\n<li class="toclevel-') toc.append(to_unicode(toclevel)) toc.append(u'"><a href="#w_') toc.append(anchor) toc.append(u'"><span class="tocnumber">') toc.append(numbering) toc.append(u'</span> <span class="toctext">') toc.append(tocline) toc.append(u'</span></a>') # if showEditLink and (not istemplate or templatetitle != u""): # if not head[headlineCount]: # head[headlineCount] = u'' # # if istemplate: # head[headlineCount] += sk.editSectionLinkForOther(templatetile, templatesection) # else: # head[headlineCount] += sk.editSectionLink(mTitle, sectionCount+1, headline_hint) # give headline the correct <h#> tag if headlineCount not in head: head[headlineCount] = [] h = head[headlineCount] h.append(u'<h') h.append(to_unicode(level)) h.append(u' id="w_') h.append(anchor) h.append('">') h.append(matches[headlineCount][1].strip()) h.append(headline.strip()) h.append(u'</h') h.append(to_unicode(level)) h.append(u'>') headlineCount += 1 if not istemplate: sectionCount += 1 if enoughToc: if toclevel < wgMaxTocLevel: toc.append(u"</li>\n") toc.append(u"</ul>\n</li>\n" * max(0, toclevel - 1)) #TODO: use gettext #toc.insert(0, u'<div id="toc"><h2>' + _('Table of Contents') + '</h2>') toc.insert(0, u'<div id="toc"><h2>Table of Contents</h2>') toc.append(u'</ul>\n</div>') # split up and insert constructed headlines blocks = _headerPat.split(text) i = 0 len_blocks = len(blocks) forceTocPosition = text.find(u"<!--MWTOC-->") full = [] while i < len_blocks: j = i/4 full.append(blocks[i]) if enoughToc and not i and isMain and forceTocPosition == -1: full += toc toc = None if j in head and head[j]: full += head[j] head[j] = None i += 4 full = u''.join(full) if forceTocPosition != -1: return full.replace(u"<!--MWTOC-->", u''.join(toc), 1) else: return full
This function accomplishes several tasks: 1) Auto-number headings if that option is enabled 2) Add an [edit] link to sections for logged in users who have enabled the option 3) Add a Table of contents on the top for users who have enabled the option 4) Auto-anchor headings It loops through all headlines, collects the necessary data, then splits up the string and re-inserts the newly formatted headlines.
def list_projects(self): """Return a list of all followed projects.""" method = 'GET' url = '/projects?circle-token={token}'.format( token=self.client.api_token) json_data = self.client.request(method, url) return json_data
Return a list of all followed projects.
def _get_revision(self, revision): """ Get's an ID revision given as str. This will always return a fill 40 char revision number :param revision: str or int or None """ if self._empty: raise EmptyRepositoryError("There are no changesets yet") if revision in [-1, 'tip', None]: revision = 'tip' try: revision = hex(self._repo.lookup(revision)) except (IndexError, ValueError, RepoLookupError, TypeError): raise ChangesetDoesNotExistError("Revision %s does not " "exist for this repository" % (revision)) return revision
Get's an ID revision given as str. This will always return a fill 40 char revision number :param revision: str or int or None
def _remove_lead_trail_false(bool_list): """Remove leading and trailing false's from a list""" # The internet can be a wonderful place... for i in (0, -1): while bool_list and not bool_list[i]: bool_list.pop(i) return bool_list
Remove leading and trailing false's from a list
def _brzozowski_algebraic_method_init(self): """Initialize Brzozowski Algebraic Method""" # Initialize B for state_a in self.mma.states: if state_a.final: self.B[state_a.stateid] = self.epsilon else: self.B[state_a.stateid] = self.empty # Initialize A for state_b in self.mma.states: self.A[state_a.stateid, state_b.stateid] = self.empty for arc in state_a.arcs: if arc.nextstate == state_b.stateid: self.A[state_a.stateid, state_b.stateid] = \ self.mma.isyms.find(arc.ilabel)
Initialize Brzozowski Algebraic Method
def nearest_qmed_catchments(self, subject_catchment, limit=None, dist_limit=500): """ Return a list of catchments sorted by distance to `subject_catchment` **and filtered to only include catchments suitable for QMED analyses**. :param subject_catchment: catchment object to measure distances to :type subject_catchment: :class:`floodestimation.entities.Catchment` :param limit: maximum number of catchments to return. Default: `None` (returns all available catchments). :type limit: int :param dist_limit: maximum distance in km. between subject and donor catchment. Default: 500 km. Increasing the maximum distance will increase computation time! :type dist_limit: float or int :return: list of catchments sorted by distance :rtype: list of :class:`floodestimation.entities.Catchment` """ dist_sq = Catchment.distance_to(subject_catchment).label('dist_sq') # Distance squared, calculated using SQL query = self.db_session.query(Catchment, dist_sq). \ join(Catchment.amax_records). \ join(Catchment.descriptors). \ filter(Catchment.id != subject_catchment.id, # Exclude subject catchment itself Catchment.is_suitable_for_qmed, # Only catchments suitable for QMED estimation Catchment.country == subject_catchment.country, # SQL dist method does not cover cross-boundary dist # Within the distance limit dist_sq <= dist_limit ** 2). \ group_by(Catchment, Descriptors.centroid_ngr_x, Descriptors.centroid_ngr_y). \ order_by(dist_sq). \ having(func.count(AmaxRecord.catchment_id) >= 10) # At least 10 AMAX records if limit: rows = query[0:limit] # Each row is tuple of (catchment, distance squared) else: rows = query.all() # Add real `dist` attribute to catchment list using previously calculated SQL dist squared catchments = [] for row in rows: catchment = row[0] catchment.dist = sqrt(row[1]) catchments.append(catchment) return catchments
Return a list of catchments sorted by distance to `subject_catchment` **and filtered to only include catchments suitable for QMED analyses**. :param subject_catchment: catchment object to measure distances to :type subject_catchment: :class:`floodestimation.entities.Catchment` :param limit: maximum number of catchments to return. Default: `None` (returns all available catchments). :type limit: int :param dist_limit: maximum distance in km. between subject and donor catchment. Default: 500 km. Increasing the maximum distance will increase computation time! :type dist_limit: float or int :return: list of catchments sorted by distance :rtype: list of :class:`floodestimation.entities.Catchment`
def data_array_from_data_iterable(data_iterable): '''Convert data iterable to raw data numpy array. Parameters ---------- data_iterable : iterable Iterable where each element is a tuple with following content: (raw data, timestamp_start, timestamp_stop, status). Returns ------- data_array : numpy.array concatenated data array ''' try: data_array = np.concatenate([item[0] for item in data_iterable]) except ValueError: # length is 0 data_array = np.empty(0, dtype=np.uint32) return data_array
Convert data iterable to raw data numpy array. Parameters ---------- data_iterable : iterable Iterable where each element is a tuple with following content: (raw data, timestamp_start, timestamp_stop, status). Returns ------- data_array : numpy.array concatenated data array
def init_dataset_prepare_args(self, parser): '''Only invoked conditionally if subcommand is 'prepare' ''' parser.add_argument('-f', '--configuration', dest='config', default=DEFAULT_USER_CONFIG_PATH, help='the path to the configuration file to use -- ./config.yaml by default') parser.add_argument('-c', '--corpus-name', help='the name of the corpus to process') parser.add_argument('-n', '--dataset-name', help='the name to assign the newly processed dataset')
Only invoked conditionally if subcommand is 'prepare'
def check_specs(specs, renamings, types): ''' Does nothing but raising PythranSyntaxError if specs are incompatible with the actual code ''' from pythran.types.tog import unify, clone, tr from pythran.types.tog import Function, TypeVariable, InferenceError functions = {renamings.get(k, k): v for k, v in specs.functions.items()} for fname, signatures in functions.items(): ftype = types[fname] for signature in signatures: sig_type = Function([tr(p) for p in signature], TypeVariable()) try: unify(clone(sig_type), clone(ftype)) except InferenceError: raise PythranSyntaxError( "Specification for `{}` does not match inferred type:\n" "expected `{}`\n" "got `Callable[[{}], ...]`".format( fname, ftype, ", ".join(map(str, sig_type.types[:-1]))) )
Does nothing but raising PythranSyntaxError if specs are incompatible with the actual code
def send_to_device(self, event_type, messages, txn_id=None): """Sends send-to-device events to a set of client devices. Args: event_type (str): The type of event to send. messages (dict): The messages to send. Format should be <user_id>: {<device_id>: <event_content>}. The device ID may also be '*', meaning all known devices for the user. txn_id (str): Optional. The transaction ID for this event, will be generated automatically otherwise. """ txn_id = txn_id if txn_id else self._make_txn_id() return self._send( "PUT", "/sendToDevice/{}/{}".format(event_type, txn_id), content={"messages": messages} )
Sends send-to-device events to a set of client devices. Args: event_type (str): The type of event to send. messages (dict): The messages to send. Format should be <user_id>: {<device_id>: <event_content>}. The device ID may also be '*', meaning all known devices for the user. txn_id (str): Optional. The transaction ID for this event, will be generated automatically otherwise.
def render_code(self): """ Try to load the previous code (if we had a crash or something) I should allow saving. """ tmp_dir = os.environ.get('TMP','') view_code = os.path.join(tmp_dir,'view.enaml') if os.path.exists(view_code): try: with open(view_code) as f: return f.read() except: pass return DEFAULT_CODE
Try to load the previous code (if we had a crash or something) I should allow saving.
def isnap(self): """Snapshot index corresponding to time step. It is set to None if no snapshot exists for the time step. """ if self._isnap is UNDETERMINED: istep = None isnap = -1 # could be more efficient if do 0 and -1 then bisection # (but loose intermediate <- would probably use too much # memory for what it's worth if search algo is efficient) while (istep is None or istep < self.istep) and isnap < 99999: isnap += 1 istep = self.sdat.snaps[isnap].istep self.sdat.snaps.bind(isnap, istep) # all intermediate istep could have their ._isnap to None if istep != self.istep: self._isnap = None return self._isnap
Snapshot index corresponding to time step. It is set to None if no snapshot exists for the time step.
def _exception_free_callback(self, callback, *args, **kwargs): """ A wrapper that remove all exceptions raised from hooks """ try: return callback(*args, **kwargs) except Exception: self._logger.exception("An exception occurred while calling a hook! ",exc_info=True) return None
A wrapper that remove all exceptions raised from hooks
def backup(self): """ Backup the developer state of `output/` in order to make it restorable and portable for user. """ # We set the current output directory path. output_path = self.base + PyFunceble.OUTPUTS["parent_directory"] # We initiate the structure base. result = {PyFunceble.OUTPUTS["parent_directory"]: {}} for root, _, files in PyFunceble.walk(output_path): # We loop through the current output directory structure. # We get the currently read directory name. directories = Directory(root.split(output_path)[1]).fix_path() # We initiate a local variable which will get the structure of the subdirectory. local_result = result[PyFunceble.OUTPUTS["parent_directory"]] for file in files: # We loop through the list of files. # We construct the file path. file_path = root + PyFunceble.directory_separator + file # We get the hash of the file. file_hash = Hash(file_path, "sha512", True).get() # We convert the file content to a list. lines_in_list = [line.rstrip("\n") for line in open(file_path)] # We convert the file content into a more flat format. # We use `@@@` as glue and implicitly replacement for `\n`. formatted_content = "@@@".join(lines_in_list) # We update the local result (and implicitly the global result) # with the files and directory informations/structure. local_result = local_result.setdefault( directories, {file: {"sha512": file_hash, "content": formatted_content}}, ) # We finally save the directory structure into the production file. Dict(result).to_json(self.base + "dir_structure_production.json")
Backup the developer state of `output/` in order to make it restorable and portable for user.
def service_post_save(instance, *args, **kwargs): """ Used to do a service full check when saving it. """ # check service if instance.is_monitored and settings.REGISTRY_SKIP_CELERY: check_service(instance.id) elif instance.is_monitored: check_service.delay(instance.id)
Used to do a service full check when saving it.
def uuid3(namespace, name): """Generate a UUID from the MD5 hash of a namespace UUID and a name.""" import md5 hash = md5.md5(namespace.bytes + name).digest() return UUID(bytes=hash[:16], version=3)
Generate a UUID from the MD5 hash of a namespace UUID and a name.
def getUTCDatetimeDOY(days=0, hours=0, minutes=0, seconds=0): """getUTCDatetimeDOY -> datetime Returns the UTC current datetime with the input timedelta arguments (days, hours, minutes, seconds) added to current date. Returns ISO-8601 datetime format for day of year: YYYY-DDDTHH:mm:ssZ """ return (datetime.datetime.utcnow() + datetime.timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds)).strftime(DOY_Format)
getUTCDatetimeDOY -> datetime Returns the UTC current datetime with the input timedelta arguments (days, hours, minutes, seconds) added to current date. Returns ISO-8601 datetime format for day of year: YYYY-DDDTHH:mm:ssZ
def start(self): """Initializes the bot, plugins, and everything.""" self.bot_start_time = datetime.now() self.webserver = Webserver(self.config['webserver']['host'], self.config['webserver']['port']) self.plugins.load() self.plugins.load_state() self._find_event_handlers() self.sc = ThreadedSlackClient(self.config['slack_token']) self.always_send_dm = ['_unauthorized_'] if 'always_send_dm' in self.config: self.always_send_dm.extend(map(lambda x: '!' + x, self.config['always_send_dm'])) # Rocket is very noisy at debug logging.getLogger('Rocket.Errors.ThreadPool').setLevel(logging.INFO) self.is_setup = True if self.test_mode: self.metrics['startup_time'] = (datetime.now() - self.bot_start_time).total_seconds() * 1000.0
Initializes the bot, plugins, and everything.
def nsx_controller_connection_addr_method(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(nsx_controller, "name") name_key.text = kwargs.pop('name') connection_addr = ET.SubElement(nsx_controller, "connection-addr") method = ET.SubElement(connection_addr, "method") method.text = kwargs.pop('method') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def status(config, group, accounts=(), region=None): """report current export state status""" config = validate.callback(config) destination = config.get('destination') client = boto3.Session().client('s3') for account in config.get('accounts', ()): if accounts and account['name'] not in accounts: continue session = get_session(account['role'], region) account_id = session.client('sts').get_caller_identity()['Account'] prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id prefix = "%s/flow-log" % prefix role = account.pop('role') if isinstance(role, six.string_types): account['account_id'] = role.split(':')[4] else: account['account_id'] = role[-1].split(':')[4] account.pop('groups') try: tag_set = client.get_object_tagging( Bucket=destination['bucket'], Key=prefix).get('TagSet', []) except ClientError: account['export'] = 'missing' continue tags = {t['Key']: t['Value'] for t in tag_set} if 'LastExport' not in tags: account['export'] = 'empty' else: last_export = parse(tags['LastExport']) account['export'] = last_export.strftime('%Y/%m/%d') accounts = [a for a in config.get('accounts') if a in accounts or not accounts] accounts.sort(key=operator.itemgetter('export'), reverse=True) print(tabulate(accounts, headers='keys'))
report current export state status
def cast(self, value): """Cast a value to the type required by the option, if one is set. This is used to cast the string values gathered from environment variable into their required type. Args: value: The value to cast. Returns: The value casted to the expected type for the option. """ # if there is no type set for the option, return the given # value unchanged. if self.type is None: return value # cast directly if self.type in (str, int, float): try: return self.type(value) except Exception as e: raise errors.BisonError( 'Failed to cast {} to {}'.format(value, self.type) ) from e # for bool, can't cast a string, since a string is truthy, # so we need to check the value. elif self.type == bool: return value.lower() == 'true' # the option type is currently not supported else: raise errors.BisonError('Unsupported type for casting: {}'.format(self.type))
Cast a value to the type required by the option, if one is set. This is used to cast the string values gathered from environment variable into their required type. Args: value: The value to cast. Returns: The value casted to the expected type for the option.
def declare(self, name, memory_type='BIT', memory_size=1, shared_region=None, offsets=None): """DECLARE a quil variable This adds the declaration to the current program and returns a MemoryReference to the base (offset = 0) of the declared memory. .. note:: This function returns a MemoryReference and cannot be chained like some of the other Program methods. Consider using ``inst(DECLARE(...))`` if you would like to chain methods, but please be aware that you must create your own MemoryReferences later on. :param name: Name of the declared variable :param memory_type: Type of the declared memory: 'BIT', 'REAL', 'OCTET' or 'INTEGER' :param memory_size: Number of array elements in the declared memory. :param shared_region: You can declare a variable that shares its underlying memory with another region. This allows aliasing. For example, you can interpret an array of measured bits as an integer. :param offsets: If you are using ``shared_region``, this allows you to share only a part of the parent region. The offset is given by an array type and the number of elements of that type. For example, ``DECLARE target-bit BIT SHARING real-region OFFSET 1 REAL 4 BIT`` will let you use target-bit to poke into the fourth bit of the second real from the leading edge of real-region. :return: a MemoryReference to the start of the declared memory region, ie a memory reference to ``name[0]``. """ self.inst(Declare(name=name, memory_type=memory_type, memory_size=memory_size, shared_region=shared_region, offsets=offsets)) return MemoryReference(name=name, declared_size=memory_size)
DECLARE a quil variable This adds the declaration to the current program and returns a MemoryReference to the base (offset = 0) of the declared memory. .. note:: This function returns a MemoryReference and cannot be chained like some of the other Program methods. Consider using ``inst(DECLARE(...))`` if you would like to chain methods, but please be aware that you must create your own MemoryReferences later on. :param name: Name of the declared variable :param memory_type: Type of the declared memory: 'BIT', 'REAL', 'OCTET' or 'INTEGER' :param memory_size: Number of array elements in the declared memory. :param shared_region: You can declare a variable that shares its underlying memory with another region. This allows aliasing. For example, you can interpret an array of measured bits as an integer. :param offsets: If you are using ``shared_region``, this allows you to share only a part of the parent region. The offset is given by an array type and the number of elements of that type. For example, ``DECLARE target-bit BIT SHARING real-region OFFSET 1 REAL 4 BIT`` will let you use target-bit to poke into the fourth bit of the second real from the leading edge of real-region. :return: a MemoryReference to the start of the declared memory region, ie a memory reference to ``name[0]``.
def get_log_entries_by_search(self, log_entry_query, log_entry_search): """Pass through to provider LogEntrySearchSession.get_log_entries_by_search""" # Implemented from azosid template for - # osid.resource.ResourceSearchSession.get_resources_by_search_template if not self._can('search'): raise PermissionDenied() return self._provider_session.get_log_entries_by_search(log_entry_query, log_entry_search)
Pass through to provider LogEntrySearchSession.get_log_entries_by_search
def create_hdf_file(self): """ :return: h5py DataSet """ mode = 'w' if not self._overwrite and os.path.exists(self._fname): mode = 'a' self._hdf_file = h5py.File(self._fname, mode) if self._hdf_basepath == '/': self._group = self._hdf_file['/'] else: self._group = self._hdf_file.create_group(self._hdf_basepath)
:return: h5py DataSet