code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _set_iroot_via_xroot(self, xroot): if self._adata.shape[1] != xroot.size: raise ValueError( 'The root vector you provided does not have the ' 'correct dimension.') dsqroot = 1e10 iroot = 0 for i in range(self._adata.shape[0]): diff = self._adata.X[i, :] - xroot dsq = diff.dot(diff) if dsq < dsqroot: dsqroot = dsq iroot = i if np.sqrt(dsqroot) < 1e-10: break logg.msg('setting root index to', iroot, v=4) if self.iroot is not None and iroot != self.iroot: logg.warn('Changing index of iroot from {} to {}.'.format(self.iroot, iroot)) self.iroot = iroot
Determine the index of the root cell. Given an expression vector, find the observation index that is closest to this vector. Parameters ---------- xroot : np.ndarray Vector that marks the root cell, the vector storing the initial condition, only relevant for computing pseudotime.
def create_baseline(tag="baseline", config='root'): return __salt__['snapper.create_snapshot'](config=config, snapshot_type='single', description="baseline snapshot", cleanup_algorithm="number", userdata={"baseline_tag": tag})
Creates a snapshot marked as baseline tag Tag name for the baseline config Configuration name. CLI Example: .. code-block:: bash salt '*' snapper.create_baseline salt '*' snapper.create_baseline my_custom_baseline
def lookup_rest_method(self, orig_request): method_name, method, params = self.config_manager.lookup_rest_method( orig_request.path, orig_request.request_uri, orig_request.http_method) orig_request.method_name = method_name return method, params
Looks up and returns rest method for the currently-pending request. Args: orig_request: An ApiRequest, the original request from the user. Returns: A tuple of (method descriptor, parameters), or (None, None) if no method was found for the current request.
def schedule_tasks(self): url = 'api/v6/releases/%d/schedule-tasks' % self.id tasks = yield self.connection._get(url) defer.returnValue(munchify(tasks))
Get all the tasks for a release. :param release_id: int, release id number. :returns: deferred that when fired returns a list of Munch (dict-like) objects representing all tasks.
def _ParseFile(self, file_obj, line_parser): lines = [ l.strip() for l in utils.ReadFileBytesAsUnicode(file_obj).splitlines() ] try: for index, line in enumerate(lines): if line: line_parser(line) except (IndexError, KeyError) as e: raise parser.ParseError("Invalid file at line %d: %s" % (index + 1, e))
Process a file line by line. Args: file_obj: The file to parse. line_parser: The parser method used to process and store line content. Raises: parser.ParseError if the parser is unable to process the line.
def replace_key(self, key, new_key): heap = self._heap position = self._position if new_key in self: raise KeyError('%s is already in the queue' % repr(new_key)) pos = position.pop(key) position[new_key] = pos heap[pos].key = new_key
Replace the key of an existing heap node in place. Raises ``KeyError`` if the key to replace does not exist or if the new key is already in the pqdict.
def add_profile_variants(self, profile_variants): results = self.db.profile_variant.insert_many(profile_variants) return results
Add several variants to the profile_variant collection in the database Args: profile_variants(list(models.ProfileVariant))
def start(self, historics_id): return self.request.post('start', data=dict(id=historics_id))
Start the historics job with the given ID. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstart :param historics_id: hash of the job to start :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
def submitQuest(self): form = pg.form(action="kitchen2.phtml") pg = form.submit() if "Woohoo" in pg.content: try: self.prize = pg.find(text = "The Chef waves his hands, and you may collect your prize...").parent.parent.find_all("b")[-1].text except Exception: logging.getLogger("neolib.quest").exception("Failed to parse kitchen quest prize", {'pg': pg}) raise parseException return True else: logging.getLogger("neolib.quest").info("Failed to complete kitchen quest", {'pg': pg}) return False
Submits the active quest, returns result Returns bool - True if successful, otherwise False
def execute_add(args, root_dir=None): command = ' '.join(args['command']) instruction = { 'command': command, 'path': os.getcwd() } print_command_factory('add')(instruction, root_dir)
Add a new command to the daemon queue. Args: args['command'] (list(str)): The actual programm call. Something like ['ls', '-a'] or ['ls -al'] root_dir (string): The path to the root directory the daemon is running in.
def raw_sign(message, secret): digest = hmac.new(secret, message, hashlib.sha256).digest() return base64.b64encode(digest)
Sign a message.
def key(username, key, all): if username and username not in current_app.config['ADMIN_USERS']: raise click.UsageError('User {} not an admin'.format(username)) def create_key(admin, key): key = ApiKey( user=admin, key=key, scopes=[Scope.admin, Scope.write, Scope.read], text='Admin key created by alertad script', expire_time=None ) try: db.get_db() key = key.create() except Exception as e: click.echo('ERROR: {}'.format(e)) else: click.echo('{} {}'.format(key.key, key.user)) if all: for admin in current_app.config['ADMIN_USERS']: create_key(admin, key) elif username: create_key(username, key) else: raise click.UsageError("Must set '--username' or use '--all'")
Create an admin API key.
def getDataAtRva(self, rva, size): return self.getDataAtOffset(self.getOffsetFromRva(rva), size)
Gets binary data at a given RVA. @type rva: int @param rva: The RVA to get the data from. @type size: int @param size: The size of the data to be obtained. @rtype: str @return: The data obtained at the given RVA.
def after_processing(eng, objects): super(InvenioProcessingFactory, InvenioProcessingFactory)\ .after_processing(eng, objects) if eng.has_completed: eng.save(WorkflowStatus.COMPLETED) else: eng.save(WorkflowStatus.HALTED) db.session.commit()
Process to update status.
def wrap_with_monitor(env, video_dir): env = ExtendToEvenDimentions(env) env = RenderObservations(env) env = gym.wrappers.Monitor(env, video_dir, force=True, video_callable=lambda idx: True, write_upon_reset=True) return env
Wrap environment with gym.Monitor. Video recording provided by Monitor requires 1) both height and width of observation to be even numbers. 2) rendering of environment Args: env: environment. video_dir: video directory. Returns: wrapped environment.
def findExtNum(self, extname=None, extver=1): extnum = None extname = extname.upper() if not self._isSimpleFits: for ext in self._image: if (hasattr(ext,'_extension') and 'IMAGE' in ext._extension and (ext.extname == extname) and (ext.extver == extver)): extnum = ext.extnum else: log.info("Image is simple fits") return extnum
Find the extension number of the give extname and extver.
def set_gl_transform(self): tangent = np.tan(self.fov_deg / 2.0 / 180.0 * np.pi) vport_radius = self.near_plane * tangent if self.vport_wd_px < self.vport_ht_px: vport_wd = 2.0 * vport_radius vport_ht = vport_wd * self.vport_ht_px / float(self.vport_wd_px) else: vport_ht = 2.0 * vport_radius vport_wd = vport_ht * self.vport_wd_px / float(self.vport_ht_px) gl.glFrustum( -0.5 * vport_wd, 0.5 * vport_wd, -0.5 * vport_ht, 0.5 * vport_ht, self.near_plane, self.far_plane ) M = Matrix4x4.look_at(self.position, self.target, self.up, False) gl.glMultMatrixf(M.get())
This side effects the OpenGL context to set the view to match the camera.
def validate_settings(settings): if not (settings.STORMPATH_ID and settings.STORMPATH_SECRET): raise ImproperlyConfigured('Both STORMPATH_ID and STORMPATH_SECRET must be specified in settings.py.') if not settings.STORMPATH_APPLICATION: raise ImproperlyConfigured('STORMPATH_APPLICATION must be specified in settings.py.')
Ensure all user-supplied settings exist, or throw a useful error message. :param obj settings: The Django settings object.
def _generate_overview_note(pass_count, only_warning_count, error_count, total_count): note_html = ['<div class="progress">'] pbars = [ [ float(error_count), 'danger', 'had errors' ], [ float(only_warning_count), 'warning', 'had warnings' ], [ float(pass_count), 'success', 'passed' ] ] for b in pbars: if b[0]: note_html.append( '<div class="progress-bar progress-bar-{pbcol}" style="width: {pct}%" data-toggle="tooltip" title="{count} {sample} {txt}">{count}</div>'. \ format( pbcol = b[1], count = int(b[0]), pct = (b[0]/float(total_count))*100.0, txt = b[2], sample = 'samples' if b[0] > 1 else 'sample' ) ) note_html.append('</div>') return "\n".join(note_html)
Generates and returns the HTML note that provides a summary of validation status.
def _hashfile(self,filename,blocksize=65536): logger.debug("Hashing file %s"%(filename)) hasher=hashlib.sha256() afile=open(filename,'rb') buf=afile.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = afile.read(blocksize) return hasher.hexdigest()
Hashes the file and returns hash
def load_from_string(self, content, container, **kwargs): _not_implemented(self, content, container, **kwargs)
Load config from given string 'content'. :param content: Config content string :param container: callble to make a container object later :param kwargs: optional keyword parameters to be sanitized :: dict :return: Dict-like object holding config parameters
def _format_job_instance(job): if not job: ret = {'Error': 'Cannot contact returner or no job with this jid'} return ret ret = {'Function': job.get('fun', 'unknown-function'), 'Arguments': list(job.get('arg', [])), 'Target': job.get('tgt', 'unknown-target'), 'Target-type': job.get('tgt_type', 'list'), 'User': job.get('user', 'root')} if 'metadata' in job: ret['Metadata'] = job.get('metadata', {}) else: if 'kwargs' in job: if 'metadata' in job['kwargs']: ret['Metadata'] = job['kwargs'].get('metadata', {}) if 'Minions' in job: ret['Minions'] = job['Minions'] return ret
Helper to format a job instance
async def terminateInstance(self, *args, **kwargs): return await self._makeApiCall(self.funcinfo["terminateInstance"], *args, **kwargs)
Terminate an instance Terminate an instance in a specified region This method is ``experimental``
def iterconnections(self): return itertools.chain( self.secureConnectionCache.cachedConnections.itervalues(), iter(self.subConnections), (self.dispatcher or ()) and self.dispatcher.iterconnections())
Iterator of all connections associated with this service, whether cached or not. For testing purposes only.
async def prover_search_credentials(wallet_handle: int, query_json: str) -> (int, int): logger = logging.getLogger(__name__) logger.debug("prover_search_credentials: >>> wallet_handle: %r, query_json: %r", wallet_handle, query_json) if not hasattr(prover_search_credentials, "cb"): logger.debug("prover_search_credentials: Creating callback") prover_search_credentials.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_int32, c_uint)) c_wallet_handle = c_int32(wallet_handle) c_query_json = c_char_p(query_json.encode('utf-8')) res = await do_call('indy_prover_search_credentials', c_wallet_handle, c_query_json, prover_search_credentials.cb) logger.debug("prover_search_credentials: <<< res: %r", res) return res
Search for credentials stored in wallet. Credentials can be filtered by tags created during saving of credential. Instead of immediately returning of fetched credentials this call returns search_handle that can be used later to fetch records by small batches (with prover_credentials_search_fetch_records). :param wallet_handle: wallet handler (created by open_wallet). :param query_json: wql style filter for credentials searching based on tags. where wql query: indy-sdk/docs/design/011-wallet-query-language/README.md :return: search_handle: Search handle that can be used later to fetch records by small batches (with prover_credentials_search_fetch_records) total_count: Total count of records
def getobjectswithnode(idf, nodekeys, nodename): keys = nodekeys listofidfobjects = (idf.idfobjects[key.upper()] for key in keys if idf.idfobjects[key.upper()]) idfobjects = [idfobj for idfobjs in listofidfobjects for idfobj in idfobjs] objwithnodes = [] for obj in idfobjects: values = obj.fieldvalues fdnames = obj.fieldnames for value, fdname in zip(values, fdnames): if fdname.endswith('Node_Name'): if value == nodename: objwithnodes.append(obj) break return objwithnodes
return all objects that mention this node name
def alchemyencoder(obj): if isinstance(obj, datetime.date): return obj.isoformat() elif isinstance(obj, decimal.Decimal): return float(obj)
JSON encoder function for SQLAlchemy special classes.
def exists(self, query, **args): return bool(self.find(query, **args).limit(1).count())
Returns True if the search matches at least one document
def start_optimisation(self, rounds: int, max_angle: float, max_distance: float, temp: float=298.15, stop_when=None, verbose=None): self._generate_initial_score() self._mmc_loop(rounds, max_angle, max_distance, temp=temp, stop_when=stop_when, verbose=verbose) return
Starts the loop fitting protocol. Parameters ---------- rounds : int The number of Monte Carlo moves to be evaluated. max_angle : float The maximum variation in rotation that can moved per step. max_distance : float The maximum distance the can be moved per step. temp : float, optional Temperature used during fitting process. stop_when : float, optional Stops fitting when energy is less than or equal to this value.
def same(*values): if not values: return True first, rest = values[0], values[1:] return all(value == first for value in rest)
Check if all values in a sequence are equal. Returns True on empty sequences. Examples -------- >>> same(1, 1, 1, 1) True >>> same(1, 2, 1) False >>> same() True
def basemz(df): d = np.array(df.columns)[df.values.argmax(axis=1)] return Trace(d, df.index, name='basemz')
The mz of the most abundant ion.
def sample_within_cc(self, cc_index, nsamples=1): polygon = self.geometries[cc_index]['polygon'] samples = [] while len(samples) < nsamples: point = PointSampler.random_point(polygon.envelope.bounds) if PointSampler.contains(polygon, point): samples.append(point) return samples
Returns randomly sampled points from a polygon. Complexity of this procedure is (A/a * nsamples) where A=area(bbox(P)) and a=area(P) where P is the polygon of the connected component cc_index
def predictions_variance(df, filepath=None): df = df.filter(regex="^VAR:") by_readout = df.mean(axis=0).reset_index(level=0) by_readout.columns = ['Readout', 'Prediction variance (mean)'] by_readout['Readout'] = by_readout.Readout.map(lambda n: n[4:]) g1 = sns.factorplot(x='Readout', y='Prediction variance (mean)', data=by_readout, kind='bar', aspect=2) for tick in g1.ax.get_xticklabels(): tick.set_rotation(90) if filepath: g1.savefig(os.path.join(filepath, 'predictions-variance.pdf')) return g1
Plots the mean variance prediction for each readout Parameters ---------- df: `pandas.DataFrame`_ DataFrame with columns starting with `VAR:` filepath: str Absolute path to a folder where to write the plots Returns ------- plot Generated plot .. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
def _get_response_mime_type(self): view_name = self.request.view_name if view_name != '': mime_type = get_registered_mime_type_for_name(view_name) else: mime_type = None acc = None for acc in self.request.accept: if acc == '*/*': mime_type = self.__get_default_response_mime_type() break try: mime_type = \ get_registered_mime_type_for_string(acc.lower()) except KeyError: pass else: break if mime_type is None: if not acc is None: headers = \ [('Location', self.request.path_url), ('Content-Type', TextPlainMime.mime_type_string), ] mime_strings = get_registered_mime_strings() exc = HTTPNotAcceptable('Requested MIME content type(s) ' 'not acceptable.', body=','.join(mime_strings), headers=headers) raise exc mime_type = self.__get_default_response_mime_type() return mime_type
Returns the reponse MIME type for this view. :raises: :class:`pyramid.httpexceptions.HTTPNotAcceptable` if the MIME content type(s) the client specified can not be handled by the view.
def get_monitoring_problems(self): res = {} if not self.sched: return res scheduler_stats = self.sched.get_scheduler_stats(details=True) if 'livesynthesis' in scheduler_stats: res['livesynthesis'] = scheduler_stats['livesynthesis'] if 'problems' in scheduler_stats: res['problems'] = scheduler_stats['problems'] return res
Get the current scheduler livesynthesis :return: live synthesis and problems dictionary :rtype: dict
def underline(text): text += "\n" for i in range(len(text)-1): text += "=" text += "\n" return text
Takes a string, and returns it underscored.
def guid2bytes(s): assert isinstance(s, str) assert len(s) == 36 p = struct.pack return b"".join([ p("<IHH", int(s[:8], 16), int(s[9:13], 16), int(s[14:18], 16)), p(">H", int(s[19:23], 16)), p(">Q", int(s[24:], 16))[2:], ])
Converts a GUID to the serialized bytes representation
def get_pending_reboot(): checks = (get_pending_update, get_pending_file_rename, get_pending_servermanager, get_pending_component_servicing, get_reboot_required_witnessed, get_pending_computer_name, get_pending_domain_join) for check in checks: if check(): return True return False
Determine whether there is a reboot pending. .. versionadded:: 2016.11.0 Returns: bool: ``True`` if the system is pending reboot, otherwise ``False`` CLI Example: .. code-block:: bash salt '*' system.get_pending_reboot
def send_reset_password_email(self, user, user_email): if not self.user_manager.USER_ENABLE_EMAIL: return assert self.user_manager.USER_ENABLE_FORGOT_PASSWORD email = user_email.email if user_email else user.email token = self.user_manager.generate_token(user.id) reset_password_link = url_for('user.reset_password', token=token, _external=True) self._render_and_send_email( email, user, self.user_manager.USER_RESET_PASSWORD_EMAIL_TEMPLATE, reset_password_link=reset_password_link, )
Send the 'reset password' email.
def _set_sysfs(self, fcp, target_wwpn, target_lun): device = '0.0.%s' % fcp port_add = "echo '%s' > " % target_wwpn port_add += "/sys/bus/ccw/drivers/zfcp/%s/port_add" % device unit_add = "echo '%s' > " % target_lun unit_add += "/sys/bus/ccw/drivers/zfcp/%(device)s/%(wwpn)s/unit_add\n"\ % {'device': device, 'wwpn': target_wwpn} return '\n'.join((port_add, unit_add))
rhel6 set WWPN and LUN in sysfs
async def _registration_completed(self, message): if not self.registered: self.registered = True self.connection.throttle = True target = message.params[0] fakemsg = self._create_message('NICK', target, source=self.nickname) await self.on_raw_nick(fakemsg)
We're connected and registered. Receive proper nickname and emit fake NICK message.
def forwards(apps, schema_editor): Movie = apps.get_model('spectator_events', 'Movie') Work = apps.get_model('spectator_events', 'Work') WorkRole = apps.get_model('spectator_events', 'WorkRole') WorkSelection = apps.get_model('spectator_events', 'WorkSelection') for m in Movie.objects.all(): work = Work.objects.create( kind='movie', title=m.title, title_sort=m.title_sort, year=m.year, imdb_id=m.imdb_id ) for role in m.roles.all(): WorkRole.objects.create( creator=role.creator, work=work, role_name=role.role_name, role_order=role.role_order ) for selection in m.events.all(): WorkSelection.objects.create( event=selection.event, work=work, order=selection.order ) m.delete()
Change all Movie objects into Work objects, and their associated data into WorkRole and WorkSelection models, then delete the Movie.
def parse(self, argument): if isinstance(argument, list): return argument elif not argument: return [] else: if self._comma_compat: argument = argument.replace(',', ' ') return argument.split()
Parses argument as whitespace-separated list of strings. It also parses argument as comma-separated list of strings if requested. Args: argument: string argument passed in the commandline. Returns: [str], the parsed flag value.
def get_availabilities_for_duration(duration, availabilities): duration_availabilities = [] start_time = '10:00' while start_time != '17:00': if start_time in availabilities: if duration == 30: duration_availabilities.append(start_time) elif increment_time_by_thirty_mins(start_time) in availabilities: duration_availabilities.append(start_time) start_time = increment_time_by_thirty_mins(start_time) return duration_availabilities
Helper function to return the windows of availability of the given duration, when provided a set of 30 minute windows.
def download_as_zip(name, filename): location = list(IPList.objects.filter(name)) if location: iplist = location[0] return iplist.download(filename=filename)
Download IPList with zip compression. Recommended for IPLists of larger sizes. This is the default format for downloading IPLists. :param str name: name of IPList :param str filename: name of filename for IPList
def remove_prompt(self): with self._cond: self._prompt = None if self._console_prompt: self._console_prompt.Stop() self._console_prompt = None self.notify_update()
Remove the prompt.
def set_parameter_error(self, name, par, error): idx = self.like.par_index(name, par) self.like[idx].setError(error) self._sync_params(name)
Set the error on the value of a parameter. Parameters ---------- name : str Source name. par : str Parameter name. error : float The value for the parameter error
def get_default_config_help(self): config_help = super(UsersCollector, self).get_default_config_help() config_help.update({ }) return config_help
Returns the default collector help text
def default_returns_func(symbol, start=None, end=None): if start is None: start = '1/1/1970' if end is None: end = _1_bday_ago() start = get_utc_timestamp(start) end = get_utc_timestamp(end) if symbol == 'SPY': filepath = data_path('spy.csv') rets = get_returns_cached(filepath, get_symbol_returns_from_yahoo, end, symbol='SPY', start='1/1/1970', end=datetime.now()) rets = rets[start:end] else: rets = get_symbol_returns_from_yahoo(symbol, start=start, end=end) return rets[symbol]
Gets returns for a symbol. Queries Yahoo Finance. Attempts to cache SPY. Parameters ---------- symbol : str Ticker symbol, e.g. APPL. start : date, optional Earliest date to fetch data for. Defaults to earliest date available. end : date, optional Latest date to fetch data for. Defaults to latest date available. Returns ------- pd.Series Daily returns for the symbol. - See full explanation in tears.create_full_tear_sheet (returns).
def _send_command_wrapper(self, cmd): cached_results = self._results_cache.get(cmd) if not cached_results: response = self._send_command(cmd) self._results_cache[cmd] = response return response else: return cached_results
Send command to the remote device with a caching feature to avoid sending the same command twice based on the SSH_MAPPER_BASE dict cmd key. Parameters ---------- cmd : str The command to send to the remote device after checking cache. Returns ------- response : str The response from the remote device.
def schedule(self, when=None, action=None, **kwargs): action = '_publish' super(BaseVersionedModel, self).schedule(when=when, action=action, **kwargs)
Schedule this item to be published. :param when: Date/time when this item should go live. None means now.
def search_datasets( self, license=None, format=None, query=None, featured=None, owner=None, organization=None, badge=None, reuses=None, page_size=20, x_fields=None, ): payload = {"badge": badge, "size": page_size, "X-Fields": x_fields} search_url = "{}/datasets".format( self.base_url, ) search_req = requests.get( search_url, params=payload, ) logger.debug(search_req.url) return search_req.json()
Search datasets within uData portal.
def debit(self, amount, credit_account, description, debit_memo="", credit_memo="", datetime=None): assert amount >= 0 return self.post(amount, credit_account, description, self_memo=debit_memo, other_memo=credit_memo, datetime=datetime)
Post a debit of 'amount' and a credit of -amount against this account and credit_account respectively. note amount must be non-negative.
async def disable_digital_reporting(self, pin): port = pin // 8 command = [PrivateConstants.REPORT_DIGITAL + port, PrivateConstants.REPORTING_DISABLE] await self._send_command(command)
Disables digital reporting. By turning reporting off for this pin, Reporting is disabled for all 8 bits in the "port" :param pin: Pin and all pins for this port :returns: No return value
def check_type(value: typing.Any, hint: typing.Optional[type]) -> bool: if hint is None: hint = NoneType actual_type = type(value) if hint is NoneType: correct = value is None elif hint is typing.Any: correct = True elif hint is typing.Pattern or hint is typing.Match: correct = isinstance(value, hint.impl_type) elif isinstance(hint, typing.TypeVar): correct = True elif issubclass(hint, typing.Callable): actual_type, correct = check_callable(value, hint) elif issubclass(hint, typing.Tuple): actual_type, correct = check_tuple(value, hint) elif issubclass(hint, typing.Union): actual_type, correct = check_union(value, hint) else: correct = isinstance(value, hint) return actual_type, correct
Check given ``value``'s type. :param value: given argument :param hint: expected type of given ``value``. as like :mod:`typing` interprets, :const:`None` is interpreted as :class:`types.NoneType` :type hint: :class:`typing.Optional`[:class:`type`]
def _learning_rate_warmup(warmup_steps, warmup_schedule="exp", hparams=None): if not warmup_steps: return tf.constant(1.) tf.logging.info("Applying %s learning rate warmup for %d steps", warmup_schedule, warmup_steps) warmup_steps = tf.to_float(warmup_steps) global_step = _global_step(hparams) if warmup_schedule == "exp": return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step) else: assert warmup_schedule == "linear" start = tf.constant(0.35) return ((tf.constant(1.) - start) / warmup_steps) * global_step + start
Learning rate warmup multiplier.
def _get_es_version(self, config): try: data = self._get_data(config.url, config, send_sc=False) version = data['version']['number'].split('-')[0] version = [int(p) for p in version.split('.')[0:3]] except AuthenticationError: raise except Exception as e: self.warning("Error while trying to get Elasticsearch version from %s %s" % (config.url, str(e))) version = [1, 0, 0] self.service_metadata('version', version) self.log.debug("Elasticsearch version is %s" % version) return version
Get the running version of elasticsearch.
def links(self): ret = [] linkheader = self.getheader('link') if not linkheader: return ret for i in linkheader.split(','): try: url, params = i.split(';', 1) except ValueError: url, params = i, '' link = {} link['url'] = url.strip() for param in params.split(';'): try: k, v = param.split('=') except ValueError: break link[k.strip()] = v.strip(''' '"''') ret.append(link) return ret
Links parsed from HTTP Link header
def iter_comments(self, number=-1, etag=None): url = self._build_url('comments', base_url=self._api) return self._iter(int(number), url, ReviewComment, etag=etag)
Iterate over the comments on this pull request. :param int number: (optional), number of comments to return. Default: -1 returns all available comments. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`ReviewComment <ReviewComment>`\ s
def createMultipleL4L2Columns(network, networkConfig): numCorticalColumns = networkConfig["numCorticalColumns"] for i in xrange(numCorticalColumns): networkConfigCopy = copy.deepcopy(networkConfig) layerConfig = networkConfigCopy["L2Params"] layerConfig["seed"] = layerConfig.get("seed", 42) + i layerConfig["numOtherCorticalColumns"] = numCorticalColumns - 1 suffix = "_" + str(i) network = createL4L2Column(network, networkConfigCopy, suffix) for i in range(networkConfig["numCorticalColumns"]): suffixSrc = "_" + str(i) for j in range(networkConfig["numCorticalColumns"]): if i != j: suffixDest = "_" + str(j) network.link( "L2Column" + suffixSrc, "L2Column" + suffixDest, "UniformLink", "", srcOutput="feedForwardOutput", destInput="lateralInput", propagationDelay=1) enableProfiling(network) return network
Create a network consisting of multiple columns. Each column contains one L4 and one L2, is identical in structure to the network created by createL4L2Column. In addition all the L2 columns are fully connected to each other through their lateral inputs. Region names have a column number appended as in externalInput_0, externalInput_1, etc. networkConfig must be of the following format (see createL4L2Column for further documentation): { "networkType": "MultipleL4L2Columns", "numCorticalColumns": 3, "externalInputSize": 1024, "sensorInputSize": 1024, "L4Params": { <constructor parameters for ApicalTMPairRegion }, "L2Params": { <constructor parameters for ColumnPoolerRegion> }, "lateralSPParams": { <constructor parameters for optional SPRegion> }, "feedForwardSPParams": { <constructor parameters for optional SPRegion> } }
def make_definition(name, base, schema): class_name = make_class_name(name) cls = register(make(class_name, base, schema)) globals()[class_name] = cls
Create a new definition.
def predict_array(self, arr): precompute = self.precompute self.precompute = False pred = super().predict_array(arr) self.precompute = precompute return pred
This over-ride is necessary because otherwise the learner method accesses the wrong model when it is called with precompute set to true Args: arr: a numpy array to be used as input to the model for prediction purposes Returns: a numpy array containing the predictions from the model
def search_definition(self, module, keyword, arg): r = module.search_one(keyword, arg) if r is not None: return r for i in module.search('include'): modulename = i.arg m = self.ctx.search_module(i.pos, modulename) if m is not None: r = m.search_one(keyword, arg) if r is not None: return r return None
Search for a defintion with `keyword` `name` Search the module and its submodules.
def between(self, minimum: int = 1, maximum: int = 1000) -> int: return self.random.randint(minimum, maximum)
Generate a random number between minimum and maximum. :param minimum: Minimum of range. :param maximum: Maximum of range. :return: Number.
def tofile(self, file_): close_file = False if not hasattr(file_, 'write'): file_ = open(file_, 'wb') close_file = True file_.write(self._f) if close_file: file_.close()
Dump all storage data to a file. The file_ argument can be a file object or a string that represents a filename. If called with a file object, it should be opened in binary mode, and the caller is responsible for closing the file. The method should only be called after the storage device has been closed to ensure that the locked flag has been set to False.
def get_apphook_field_names(model): key = APP_CONFIG_FIELDS_KEY.format( app_label=model._meta.app_label, model_name=model._meta.object_name ).lower() if not hasattr(model, key): field_names = _get_apphook_field_names(model) setattr(model, key, field_names) return getattr(model, key)
Cache app-hook field names on model :param model: model class or object :return: list of foreign key field names to AppHookConfigs
def _argsort_and_resolve_ties(time, random_state): n_samples = len(time) order = numpy.argsort(time, kind="mergesort") i = 0 while i < n_samples - 1: inext = i + 1 while inext < n_samples and time[order[i]] == time[order[inext]]: inext += 1 if i + 1 != inext: random_state.shuffle(order[i:inext]) i = inext return order
Like numpy.argsort, but resolves ties uniformly at random
def list_leases(self, prefix): api_path = '/v1/sys/leases/lookup/{prefix}'.format(prefix=prefix) response = self._adapter.list( url=api_path, ) return response.json()
Retrieve a list of lease ids. Supported methods: LIST: /sys/leases/lookup/{prefix}. Produces: 200 application/json :param prefix: Lease prefix to filter list by. :type prefix: str | unicode :return: The JSON response of the request. :rtype: dict
def users_create(self, email, name, password, username, **kwargs): return self.__call_api_post('users.create', email=email, name=name, password=password, username=username, kwargs=kwargs)
Creates a user
def run(self, forever=True): loop = self.create_connection() self.add_signal_handlers() if forever: loop.run_forever()
start the bot
def continuous(self, *args): new_df = copy_df(self) fields = _render_field_set(args) self._assert_ml_fields_valid(*fields) new_df._perform_operation(op.FieldContinuityOperation(dict((_get_field_name(f), True) for f in fields))) return new_df
Set fields to be continuous. :rtype: DataFrame :Example: >>> # Table schema is create table test(f1 double, f2 string) >>> # Original continuity: f1=DISCRETE, f2=DISCRETE >>> # Now we want to set ``f1`` and ``f2`` into continuous >>> new_ds = df.continuous('f1 f2')
def __within2(value, within=None, errmsg=None, dtype=None): valid, _value = False, value if dtype: try: _value = dtype(value) valid = _value in within except ValueError: pass else: valid = _value in within if errmsg is None: if dtype: typename = getattr(dtype, '__name__', hasattr(dtype, '__class__') and getattr(dtype.__class__, 'name', dtype)) errmsg = '{0} within \'{1}\''.format(typename, within) else: errmsg = 'within \'{0}\''.format(within) return (valid, _value, errmsg)
validate that a value is in ``within`` and optionally a ``dtype``
def write(self, data): if isinstance(data, bytearray): data = bytes(data) if not isinstance(data, byte_types): raise ValueError("A bytes argument is required") res = librtmp.RTMP_Write(self.client.rtmp, data, len(data)) if res < 0: raise IOError("Failed to write data") return res
Writes data to the stream. :param data: bytes, FLV data to write to the stream The data passed can contain multiple FLV tags, but it MUST always contain complete tags or undefined behaviour might occur. Raises :exc:`IOError` on error.
def snapshots_to_send(source_snaps, dest_snaps): if len(source_snaps) == 0: raise AssertionError("No snapshots exist locally!") if len(dest_snaps) == 0: return None, source_snaps[-1] last_remote = dest_snaps[-1] for snap in reversed(source_snaps): if snap == last_remote: return last_remote, source_snaps[-1] raise AssertionError("Latest snapshot on destination doesn't exist on source!")
return pair of snapshots
def grok_state(self, obj): if 'state' in obj: my_state = obj['state'].lower() if my_state != 'absent' and my_state != 'present': raise aomi_excep \ .Validation('state must be either "absent" or "present"') self.present = obj.get('state', 'present').lower() == 'present'
Determine the desired state of this resource based on data present
def is_nested_list_like(obj): return (is_list_like(obj) and hasattr(obj, '__len__') and len(obj) > 0 and all(is_list_like(item) for item in obj))
Check if the object is list-like, and that all of its elements are also list-like. .. versionadded:: 0.20.0 Parameters ---------- obj : The object to check Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_nested_list_like([[1, 2, 3]]) True >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) True >>> is_nested_list_like(["foo"]) False >>> is_nested_list_like([]) False >>> is_nested_list_like([[1, 2, 3], 1]) False Notes ----- This won't reliably detect whether a consumable iterator (e. g. a generator) is a nested-list-like without consuming the iterator. To avoid consuming it, we always return False if the outer container doesn't define `__len__`. See Also -------- is_list_like
def run(self, *args): params = self.parser.parse_args(args) code = self.initialize(name=params.name, reuse=params.reuse) return code
Initialize a registry. Create and initialize an empty registry which its name is defined by <name> parameter. Required tables will be also created.
def update(self, message=None, subject=None, days=None, downloads=None, notify=None): method, url = get_URL('update') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'transferid': self.transfer_id, } data = { 'message': message or self.transfer_info.get('message'), 'message': subject or self.transfer_info.get('subject'), 'days': days or self.transfer_info.get('days'), 'downloads': downloads or self.transfer_info.get('downloads'), 'notify': notify or self.transfer_info.get('notify') } payload.update(data) res = getattr(self.session, method)(url, params=payload) if res.status_code: self.transfer_info.update(data) return True hellraiser(res)
Update properties for a transfer. :param message: updated message to recipient(s) :param subject: updated subject for trasfer :param days: updated amount of days transfer is available :param downloads: update amount of downloads allowed for transfer :param notify: update whether to notifiy on downloads or not :type message: ``str`` or ``unicode`` :type subject: ``str`` or ``unicode`` :type days: ``int`` :type downloads: ``int`` :type notify: ``bool`` :rtype: ``bool``
def plot_all(self, show=True, **kwargs): figs = []; app = figs.append app(self.plot_stacked_hist(show=show)) app(self.plot_efficiency(show=show)) app(self.plot_pie(show=show)) return figs
Call all plot methods provided by the parser.
def get_consensus_tree(self, cutoff=0.0, best_tree=None): if best_tree: raise NotImplementedError("best_tree option not yet supported.") cons = ConsensusTree(self.treelist, cutoff) cons.update() return cons.ttree
Returns an extended majority rule consensus tree as a Toytree object. Node labels include 'support' values showing the occurrence of clades in the consensus tree across trees in the input treelist. Clades with support below 'cutoff' are collapsed into polytomies. If you enter an optional 'best_tree' then support values from the treelist calculated for clades in this tree, and the best_tree is returned with support values added to nodes. Params ------ cutoff (float; default=0.0): Cutoff below which clades are collapsed in the majority rule consensus tree. This is a proportion (e.g., 0.5 means 50%). best_tree (Toytree; optional): A tree that support values should be calculated for and added to. For example, you want to calculate how often clades in your best ML tree are supported in 100 bootstrap trees.
def add_scenario(self, parameter: 'Parameter', scenario_name: str = default_scenario): self.scenarios[scenario_name] = parameter
Add a scenario for this parameter. :param scenario_name: :param parameter: :return:
def dumps(self): io = six.StringIO() self.dump(io) io.seek(0) return io.read()
Dump data to a string. :rtype: str
def from_pkg(self): if self._version is None: frame = caller(1) pkg = frame.f_globals.get('__package__') if pkg is not None: self._version = pkg_version(pkg) return self
Use pkg_resources to determine the installed package version.
def components(self) -> List['DAGCircuit']: comps = nx.weakly_connected_component_subgraphs(self.graph) return [DAGCircuit(comp) for comp in comps]
Split DAGCircuit into independent components
def wp_draw_callback(self, points): if len(points) < 3: return from MAVProxy.modules.lib import mp_util home = self.wploader.wp(0) self.wploader.clear() self.wploader.target_system = self.target_system self.wploader.target_component = self.target_component self.wploader.add(home) if self.get_default_frame() == mavutil.mavlink.MAV_FRAME_GLOBAL_TERRAIN_ALT: use_terrain = True else: use_terrain = False for p in points: self.wploader.add_latlonalt(p[0], p[1], self.settings.wpalt, terrain_alt=use_terrain) self.send_all_waypoints()
callback from drawing waypoints
def add_model(self, *args, **kwargs): if self.category != Category.MODEL: raise APIError("Part should be of category MODEL") return self._client.create_model(self, *args, **kwargs)
Add a new child model to this model. In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as additional keyword=value argument to this method. This will improve performance of the backend against a trade-off that someone looking at the frontend won't notice any changes unless the page is refreshed. :return: a :class:`Part` of category `MODEL`
def spawn(self, *cmds: str) -> List[SublemonSubprocess]: if not self._is_running: raise SublemonRuntimeError( 'Attempted to spawn subprocesses from a non-started server') subprocs = [SublemonSubprocess(self, cmd) for cmd in cmds] for sp in subprocs: asyncio.ensure_future(sp.spawn()) return subprocs
Coroutine to spawn shell commands. If `max_concurrency` is reached during the attempt to spawn the specified subprocesses, excess subprocesses will block while attempting to acquire this server's semaphore.
def _convert_many_to_one(self, col_name, label, description, lst_validators, filter_rel_fields, form_props): query_func = self._get_related_query_func(col_name, filter_rel_fields) get_pk_func = self._get_related_pk_func(col_name) extra_classes = None allow_blank = True if not self.datamodel.is_nullable(col_name): lst_validators.append(validators.DataRequired()) allow_blank = False else: lst_validators.append(validators.Optional()) form_props[col_name] = \ QuerySelectField(label, description=description, query_func=query_func, get_pk_func=get_pk_func, allow_blank=allow_blank, validators=lst_validators, widget=Select2Widget(extra_classes=extra_classes)) return form_props
Creates a WTForm field for many to one related fields, will use a Select box based on a query. Will only work with SQLAlchemy interface.
def get_archive_cmdlist_func (program, command, format): key = util.stripext(os.path.basename(program).lower()) modulename = ".programs." + ProgramModules.get(key, key) try: module = importlib.import_module(modulename, __name__) except ImportError as msg: raise util.PatoolError(msg) try: return getattr(module, '%s_%s' % (command, format)) except AttributeError as msg: raise util.PatoolError(msg)
Get the Python function that executes the given program.
def get_config(basedir, files): config_details = config.find( basedir, files, environment.Environment.from_env_file(basedir)) return config.load(config_details)
Returns the config object for the selected docker-compose.yml This is an instance of `compose.config.config.Config`.
def reqTickers( self, *contracts: List[Contract], regulatorySnapshot: bool = False) -> List[Ticker]: return self._run( self.reqTickersAsync( *contracts, regulatorySnapshot=regulatorySnapshot))
Request and return a list of snapshot tickers. The list is returned when all tickers are ready. This method is blocking. Args: contracts: Contracts to get tickers for. regulatorySnapshot: Request NBBO snapshots (may incur a fee).
def calc_support(self, items): if not items: return 1.0 if not self.num_transaction: return 0.0 sum_indexes = None for item in items: indexes = self.__transaction_index_map.get(item) if indexes is None: return 0.0 if sum_indexes is None: sum_indexes = indexes else: sum_indexes = sum_indexes.intersection(indexes) return float(len(sum_indexes)) / self.__num_transaction
Returns a support for items. Arguments: items -- Items as an iterable object (eg. ['A', 'B']).
def makeService(self, options): return NodeService( port=options['port'], host=options['host'], broker_host=options['broker_host'], broker_port=options['broker_port'], debug=options['debug'] )
Construct a Node Server
def _skw_matches_comparator(kw0, kw1): def compare(a, b): return (a > b) - (a < b) list_comparison = compare(len(kw1[1][0]), len(kw0[1][0])) if list_comparison: return list_comparison if kw0[0].isComposite() and kw1[0].isComposite(): component_avg0 = sum(kw0[1][1]) / len(kw0[1][1]) component_avg1 = sum(kw1[1][1]) / len(kw1[1][1]) component_comparison = compare(component_avg1, component_avg0) if component_comparison: return component_comparison return compare(len(str(kw1[0])), len(str(kw0[0])))
Compare 2 single keywords objects. First by the number of their spans (ie. how many times they were found), if it is equal it compares them by lenghts of their labels.
def suppress_stdout(): save_stdout = sys.stdout sys.stdout = DevNull() yield sys.stdout = save_stdout
Context manager that suppresses stdout. Examples: >>> with suppress_stdout(): ... print('Test print') >>> print('test') test
def describe(self, resource=None): if resource is None: simple_descriptor = copy.deepcopy(self._datapackage.descriptor) for resource in simple_descriptor['resources']: resource.pop('schema', None) return simple_descriptor else: return self.__resources[resource].descriptor
Describe dataset or resource within dataset :param resource: The name of a specific resource (i.e. file or table) contained in the dataset. If ``resource`` is None, this method will describe the dataset itself. (Default value = None) :type resource: str, optional :returns: The descriptor of the dataset or of a specific resource, if ``resource`` is specified in the call. :rtype: dict
def libvlc_video_set_teletext(p_mi, i_page): f = _Cfunctions.get('libvlc_video_set_teletext', None) or \ _Cfunction('libvlc_video_set_teletext', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_int) return f(p_mi, i_page)
Set new teletext page to retrieve. @param p_mi: the media player. @param i_page: teletex page number requested.
def _remove_non_methods(): cur_module = sys.modules[__name__] my_globals = dict(globals()) from prettytensor.pretty_tensor_class import PrettyTensor for name, _ in six.iteritems(my_globals): if not hasattr(PrettyTensor, name): delattr(cur_module, name) if hasattr(cur_module, 'bookkeeper'): delattr(cur_module, 'bookkeeper')
Removes any object in dict that is not a registered method.
def validate_schema(yaml_def, branch=False): schema = Schema({ 'lane' if not branch else 'branch': { Optional('name'): str, Optional('run_parallel'): bool, 'tasks': list } }) schema.validate(yaml_def) from schema import And, Use task_schema = Schema({ 'class': str, Optional('kwargs'): Or({str: object}), Optional('args'): Or([object], And(Use(lambda a: isinstance(a, dict)), False)) }) def validate_tasks(tasks): for task in tasks: try: Schema({'branch': dict}).validate(task) validate_schema(task, True) except SchemaError: task_schema.validate(task) return True return validate_tasks(yaml_def['lane']['tasks'] if not branch else yaml_def['branch']['tasks'])
Validates the schema of a dict Parameters ---------- yaml_def : dict dict whose schema shall be validated branch : bool Indicates whether `yaml_def` is a dict of a top-level lane, or of a branch inside a lane (needed for recursion) Returns ------- bool True if validation was successful
def state(ctx): dev = ctx.obj click.echo(dev) ctx.forward(locked) ctx.forward(low_battery) ctx.forward(window_open) ctx.forward(boost) ctx.forward(temp) ctx.forward(mode) ctx.forward(valve_state)
Prints out all available information.