code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def handle_put_user(self, req):
"""Handles the PUT v2/<account>/<user> call for adding a user to an
account.
X-Auth-User-Key represents the user's key (url encoded),
- OR -
X-Auth-User-Key-Hash represents the user's hashed key (url encoded),
X-Auth-User-Admin may be set to `true` to create an account .admin, and
X-Auth-User-Reseller-Admin may be set to `true` to create a
.reseller_admin.
Creating users
**************
Can only be called by an account .admin unless the user is to be a
.reseller_admin, in which case the request must be by .super_admin.
Changing password/key
*********************
1) reseller_admin key can be changed by super_admin and by himself.
2) admin key can be changed by any admin in same account,
reseller_admin, super_admin and himself.
3) Regular user key can be changed by any admin in his account,
reseller_admin, super_admin and himself.
:param req: The swob.Request to process.
:returns: swob.Response, 2xx on success.
"""
# Validate path info
account = req.path_info_pop()
user = req.path_info_pop()
key = unquote(req.headers.get('x-auth-user-key', ''))
key_hash = unquote(req.headers.get('x-auth-user-key-hash', ''))
admin = req.headers.get('x-auth-user-admin') == 'true'
reseller_admin = \
req.headers.get('x-auth-user-reseller-admin') == 'true'
if reseller_admin:
admin = True
if req.path_info or not account or account[0] == '.' or not user or \
user[0] == '.' or (not key and not key_hash):
return HTTPBadRequest(request=req)
if key_hash:
try:
swauth.authtypes.validate_creds(key_hash)
except ValueError:
return HTTPBadRequest(request=req)
user_arg = account + ':' + user
if reseller_admin:
if not self.is_super_admin(req) and\
not self.is_user_changing_own_key(req, user_arg):
return self.denied_response(req)
elif not self.is_account_admin(req, account) and\
not self.is_user_changing_own_key(req, user_arg):
return self.denied_response(req)
path = quote('/v1/%s/%s' % (self.auth_account, account))
resp = self.make_pre_authed_request(
req.environ, 'HEAD', path).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not retrieve account id value: %s %s' %
(path, resp.status))
headers = {'X-Object-Meta-Account-Id':
resp.headers['x-container-meta-account-id']}
# Create the object in the main auth account (this object represents
# the user)
path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user))
groups = ['%s:%s' % (account, user), account]
if admin:
groups.append('.admin')
if reseller_admin:
groups.append('.reseller_admin')
auth_value = key_hash or self.auth_encoder().encode(key)
resp = self.make_pre_authed_request(
req.environ, 'PUT', path,
json.dumps({'auth': auth_value,
'groups': [{'name': g} for g in groups]}),
headers=headers).get_response(self.app)
if resp.status_int == 404:
return HTTPNotFound(request=req)
if resp.status_int // 100 != 2:
raise Exception('Could not create user object: %s %s' %
(path, resp.status))
return HTTPCreated(request=req) | Handles the PUT v2/<account>/<user> call for adding a user to an
account.
X-Auth-User-Key represents the user's key (url encoded),
- OR -
X-Auth-User-Key-Hash represents the user's hashed key (url encoded),
X-Auth-User-Admin may be set to `true` to create an account .admin, and
X-Auth-User-Reseller-Admin may be set to `true` to create a
.reseller_admin.
Creating users
**************
Can only be called by an account .admin unless the user is to be a
.reseller_admin, in which case the request must be by .super_admin.
Changing password/key
*********************
1) reseller_admin key can be changed by super_admin and by himself.
2) admin key can be changed by any admin in same account,
reseller_admin, super_admin and himself.
3) Regular user key can be changed by any admin in his account,
reseller_admin, super_admin and himself.
:param req: The swob.Request to process.
:returns: swob.Response, 2xx on success. |
def asr_breaking(self, tol_eigendisplacements=1e-5):
"""
Returns the breaking of the acoustic sum rule for the three acoustic modes,
if Gamma is present. None otherwise.
If eigendisplacements are available they are used to determine the acoustic
modes: selects the bands corresponding to the eigendisplacements that
represent to a translation within tol_eigendisplacements. If these are not
identified or eigendisplacements are missing the first 3 modes will be used
(indices [0:3]).
"""
for i in range(self.nb_qpoints):
if np.allclose(self.qpoints[i].frac_coords, (0, 0, 0)):
if self.has_eigendisplacements:
acoustic_modes_index = []
for j in range(self.nb_bands):
eig = self.eigendisplacements[j][i]
if np.max(np.abs(eig[1:] - eig[:1])) < tol_eigendisplacements:
acoustic_modes_index.append(j)
# if acoustic modes are not correctly identified return use
# the first three modes
if len(acoustic_modes_index) != 3:
acoustic_modes_index = [0, 1, 2]
return self.bands[acoustic_modes_index, i]
else:
return self.bands[:3, i]
return None | Returns the breaking of the acoustic sum rule for the three acoustic modes,
if Gamma is present. None otherwise.
If eigendisplacements are available they are used to determine the acoustic
modes: selects the bands corresponding to the eigendisplacements that
represent to a translation within tol_eigendisplacements. If these are not
identified or eigendisplacements are missing the first 3 modes will be used
(indices [0:3]). |
def rename(self, old_fieldname, new_fieldname):
"""
Renames a specific field, and preserves the underlying order.
"""
if old_fieldname not in self:
raise Exception("DataTable does not have field `%s`" %
old_fieldname)
if not isinstance(new_fieldname, basestring):
raise ValueError("DataTable fields must be strings, not `%s`" %
type(new_fieldname))
if old_fieldname == new_fieldname:
return
new_names = self.fields
location = new_names.index(old_fieldname)
del new_names[location]
new_names.insert(location, new_fieldname)
self.fields = new_names | Renames a specific field, and preserves the underlying order. |
def _post_fork_init(self):
'''
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
'''
# Load Runners
ropts = dict(self.opts)
ropts['quiet'] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True | Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked." |
def price_diff(self):
'返回DataStruct.price的一阶差分'
res = self.price.groupby(level=1).apply(lambda x: x.diff(1))
res.name = 'price_diff'
return res | 返回DataStruct.price的一阶差分 |
def _offset_setup(self,sigangle,leading,deltaAngleTrack):
"""The part of the setup related to calculating the stream/progenitor offset"""
#From the progenitor orbit, determine the sigmas in J and angle
self._sigjr= (self._progenitor.rap()-self._progenitor.rperi())/numpy.pi*self._sigv
self._siglz= self._progenitor.rperi()*self._sigv
self._sigjz= 2.*self._progenitor.zmax()/numpy.pi*self._sigv
#Estimate the frequency covariance matrix from a diagonal J matrix x dOdJ
self._sigjmatrix= numpy.diag([self._sigjr**2.,
self._siglz**2.,
self._sigjz**2.])
self._sigomatrix= numpy.dot(self._dOdJp,
numpy.dot(self._sigjmatrix,self._dOdJp.T))
#Estimate angle spread as the ratio of the largest to the middle eigenvalue
self._sigomatrixEig= numpy.linalg.eig(self._sigomatrix)
self._sigomatrixEigsortIndx= numpy.argsort(self._sigomatrixEig[0])
self._sortedSigOEig= sorted(self._sigomatrixEig[0])
if sigangle is None:
self._sigangle= self._sigv*1.8
else:
self._sigangle= sigangle
self._sigangle2= self._sigangle**2.
self._lnsigangle= numpy.log(self._sigangle)
#Estimate the frequency mean as lying along the direction of the largest eigenvalue
self._dsigomeanProgDirection= self._sigomatrixEig[1][:,numpy.argmax(self._sigomatrixEig[0])]
self._progenitor_Omega_along_dOmega= \
numpy.dot(self._progenitor_Omega,self._dsigomeanProgDirection)
#Make sure we are modeling the correct part of the stream
self._leading= leading
self._sigMeanSign= 1.
if self._leading and self._progenitor_Omega_along_dOmega < 0.:
self._sigMeanSign= -1.
elif not self._leading and self._progenitor_Omega_along_dOmega > 0.:
self._sigMeanSign= -1.
self._progenitor_Omega_along_dOmega*= self._sigMeanSign
self._sigomean= self._progenitor_Omega\
+self._sigMeanOffset*self._sigMeanSign\
*numpy.sqrt(numpy.amax(self._sigomatrixEig[0]))\
*self._dsigomeanProgDirection
#numpy.dot(self._dOdJp,
# numpy.array([self._sigjr,self._siglz,self._sigjz]))
self._dsigomeanProg= self._sigomean-self._progenitor_Omega
self._meandO= self._sigMeanOffset\
*numpy.sqrt(numpy.amax(self._sigomatrixEig[0]))
#Store cholesky of sigomatrix for fast evaluation
self._sigomatrixNorm=\
numpy.sqrt(numpy.sum(self._sigomatrix**2.))
self._sigomatrixinv, self._sigomatrixLogdet= \
fast_cholesky_invert(self._sigomatrix/self._sigomatrixNorm,
tiny=10.**-15.,logdet=True)
self._sigomatrixinv/= self._sigomatrixNorm
deltaAngleTrackLim = (self._sigMeanOffset+4.) * numpy.sqrt(
self._sortedSigOEig[2]) * self._tdisrupt
if (deltaAngleTrack is None):
deltaAngleTrack = deltaAngleTrackLim
else:
if (deltaAngleTrack > deltaAngleTrackLim):
warnings.warn("WARNING: angle range large compared to plausible value.", galpyWarning)
self._deltaAngleTrack= deltaAngleTrack
return None | The part of the setup related to calculating the stream/progenitor offset |
def download(url, filename=None, print_progress=0, delete_fail=True,
**kwargs):
"""
Download a file, optionally printing a simple progress bar
url: The URL to download
filename: The filename to save to, default is to use the URL basename
print_progress: The length of the progress bar, use 0 to disable
delete_fail: If True delete the file if the download was not successful,
default is to keep the temporary file
return: The downloaded filename
"""
blocksize = 1024 * 1024
downloaded = 0
progress = None
log.info('Downloading %s', url)
response = open_url(url, **kwargs)
if not filename:
filename = os.path.basename(url)
output = None
try:
total = int(response.headers['Content-Length'])
if print_progress:
progress = ProgressBar(print_progress, total)
with tempfile.NamedTemporaryFile(
prefix=filename + '.', dir='.', delete=False) as output:
while downloaded < total:
block = response.read(blocksize)
output.write(block)
downloaded += len(block)
if progress:
progress.update(downloaded)
os.rename(output.name, filename)
output = None
return filename
finally:
response.close()
if delete_fail and output:
os.unlink(output.name) | Download a file, optionally printing a simple progress bar
url: The URL to download
filename: The filename to save to, default is to use the URL basename
print_progress: The length of the progress bar, use 0 to disable
delete_fail: If True delete the file if the download was not successful,
default is to keep the temporary file
return: The downloaded filename |
def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
"""
Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % sender_key_info['key_id']}
try:
my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load private key'}
# do the decryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'decryption ok':
log.debug("decrypt_file: %s" % res.__dict__)
return {'error': 'Failed to decrypt data'}
log.debug("decryption succeeded from keys in %s" % config_dir)
return {'status': True} | Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error |
def on_connect(client):
"""
Sample on_connect function.
Handles new connections.
"""
print "++ Opened connection to %s" % client.addrport()
broadcast('%s joins the conversation.\n' % client.addrport() )
CLIENT_LIST.append(client)
client.send("Welcome to the Chat Server, %s.\n" % client.addrport() ) | Sample on_connect function.
Handles new connections. |
def shrank(self, block=None, percent_diff=0, abs_diff=1):
"""
Returns whether a block has less nets than before
:param Block block: block to check (if changed)
:param Number percent_diff: percentage difference threshold
:param int abs_diff: absolute difference threshold
:return: boolean
This function checks whether the change in the number of
nets is greater than the percentage and absolute difference
thresholds.
"""
if block is None:
block = self.block
cur_nets = len(block.logic)
net_goal = self.prev_nets * (1 - percent_diff) - abs_diff
less_nets = (cur_nets <= net_goal)
self.prev_nets = cur_nets
return less_nets | Returns whether a block has less nets than before
:param Block block: block to check (if changed)
:param Number percent_diff: percentage difference threshold
:param int abs_diff: absolute difference threshold
:return: boolean
This function checks whether the change in the number of
nets is greater than the percentage and absolute difference
thresholds. |
def dictToFile(dictionary,replicateKey,outFileName):
'''
Function to write dictionary data, from subsampleReplicates, to file an hdf5 file.
:param dictionary: nested dictionary returned by subsampleReplicates
:param replicateKey: string designating the replicate written to file
:param outFileName: string defining the hdf5 filename
'''
replicateToFile=h5py.File(outFileName,"w")
for i in range(len(dictionary[replicateKey])):
replicateToFile.create_dataset("{}".format(dictionary[replicateKey].keys()[i])\
,data=dictionary[replicateKey].values()[i]\
,compression="gzip")
replicateToFile.close() | Function to write dictionary data, from subsampleReplicates, to file an hdf5 file.
:param dictionary: nested dictionary returned by subsampleReplicates
:param replicateKey: string designating the replicate written to file
:param outFileName: string defining the hdf5 filename |
def delete_model(self, meta: dict):
"""Delete the model from GCS."""
bucket = self.connect()
if bucket is None:
raise BackendRequiredError
blob_name = "models/%s/%s.asdf" % (meta["model"], meta["uuid"])
self._log.info(blob_name)
try:
self._log.info("Deleting model ...")
bucket.delete_blob(blob_name)
except NotFound:
self._log.warning("Model %s already deleted.", meta["uuid"]) | Delete the model from GCS. |
def diffmap(adata, n_comps=15, copy=False):
"""Diffusion Maps [Coifman05]_ [Haghverdi15]_ [Wolf18]_.
Diffusion maps [Coifman05]_ has been proposed for visualizing single-cell
data by [Haghverdi15]_. The tool uses the adapted Gaussian kernel suggested
by [Haghverdi16]_ in the implementation of [Wolf18]_.
The width ("sigma") of the connectivity kernel is implicitly determined by
the number of neighbors used to compute the single-cell graph in
:func:`~scanpy.api.neighbors`. To reproduce the original implementation
using a Gaussian kernel, use `method=='gauss'` in
:func:`~scanpy.api.neighbors`. To use an exponential kernel, use the default
`method=='umap'`. Differences between these options shouldn't usually be
dramatic.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
n_comps : `int`, optional (default: 15)
The number of dimensions of the representation.
copy : `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_diffmap** : :class:`numpy.ndarray` (`adata.obsm`)
Diffusion map representation of data, which is the right eigen basis of
the transition matrix with eigenvectors as columns.
**diffmap_evals** : :class:`numpy.ndarray` (`adata.uns`)
Array of size (number of eigen vectors). Eigenvalues of transition matrix.
"""
if 'neighbors' not in adata.uns:
raise ValueError(
'You need to run `pp.neighbors` first to compute a neighborhood graph.')
if n_comps <= 2:
raise ValueError(
'Provide any value greater than 2 for `n_comps`. ')
adata = adata.copy() if copy else adata
_diffmap(adata, n_comps=n_comps)
return adata if copy else None | Diffusion Maps [Coifman05]_ [Haghverdi15]_ [Wolf18]_.
Diffusion maps [Coifman05]_ has been proposed for visualizing single-cell
data by [Haghverdi15]_. The tool uses the adapted Gaussian kernel suggested
by [Haghverdi16]_ in the implementation of [Wolf18]_.
The width ("sigma") of the connectivity kernel is implicitly determined by
the number of neighbors used to compute the single-cell graph in
:func:`~scanpy.api.neighbors`. To reproduce the original implementation
using a Gaussian kernel, use `method=='gauss'` in
:func:`~scanpy.api.neighbors`. To use an exponential kernel, use the default
`method=='umap'`. Differences between these options shouldn't usually be
dramatic.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
n_comps : `int`, optional (default: 15)
The number of dimensions of the representation.
copy : `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_diffmap** : :class:`numpy.ndarray` (`adata.obsm`)
Diffusion map representation of data, which is the right eigen basis of
the transition matrix with eigenvectors as columns.
**diffmap_evals** : :class:`numpy.ndarray` (`adata.uns`)
Array of size (number of eigen vectors). Eigenvalues of transition matrix. |
def _wait_until(obj, att, desired, callback, interval, attempts, verbose,
verbose_atts):
"""
Loops until either the desired value of the attribute is reached, or the
number of attempts is exceeded.
"""
if not isinstance(desired, (list, tuple)):
desired = [desired]
if verbose_atts is None:
verbose_atts = []
if not isinstance(verbose_atts, (list, tuple)):
verbose_atts = [verbose_atts]
infinite = (attempts == 0)
attempt = 0
start = time.time()
while infinite or (attempt < attempts):
try:
# For servers:
obj.get()
except AttributeError:
try:
# For other objects that don't support .get()
obj = obj.manager.get(obj.id)
except AttributeError:
# punt
raise exc.NoReloadError("The 'wait_until' method is not "
"supported for '%s' objects." % obj.__class__)
attval = getattr(obj, att)
if verbose:
elapsed = time.time() - start
msgs = ["Current value of %s: %s (elapsed: %4.1f seconds)" % (
att, attval, elapsed)]
for vatt in verbose_atts:
vattval = getattr(obj, vatt, None)
msgs.append("%s=%s" % (vatt, vattval))
print(" ".join(msgs))
if attval in desired:
return obj
time.sleep(interval)
attempt += 1
return obj | Loops until either the desired value of the attribute is reached, or the
number of attempts is exceeded. |
def get_user_for_membersuite_entity(membersuite_entity):
"""Returns a User for `membersuite_entity`.
membersuite_entity is any MemberSuite object that has the fields
membersuite_id, email_address, first_name, and last_name, e.g.,
PortalUser or Individual.
"""
user = None
user_created = False
# First, try to match on username.
user_username = generate_username(membersuite_entity)
try:
user = User.objects.get(username=user_username)
except User.DoesNotExist:
pass
# Next, try to match on email address.
if not user:
try:
user = User.objects.filter(
email=membersuite_entity.email_address)[0]
except IndexError:
pass
# No match? Create one.
if not user:
user = User.objects.create(
username=user_username,
email=membersuite_entity.email_address,
first_name=membersuite_entity.first_name,
last_name=membersuite_entity.last_name)
user_created = True
return user, user_created | Returns a User for `membersuite_entity`.
membersuite_entity is any MemberSuite object that has the fields
membersuite_id, email_address, first_name, and last_name, e.g.,
PortalUser or Individual. |
def _raise_error_if_not_of_type(arg, expected_type, arg_name=None):
"""
Check if the input is of expected type.
Parameters
----------
arg : Input argument.
expected_type : A type OR a list of types that the argument is expected
to be.
arg_name : The name of the variable in the function being used. No
name is assumed if set to None.
Examples
--------
_raise_error_if_not_of_type(sf, str, 'sf')
_raise_error_if_not_of_type(sf, [str, int], 'sf')
"""
display_name = "%s " % arg_name if arg_name is not None else "Argument "
lst_expected_type = [expected_type] if \
type(expected_type) == type else expected_type
err_msg = "%smust be of type %s " % (display_name,
' or '.join([x.__name__ for x in lst_expected_type]))
err_msg += "(not %s)." % type(arg).__name__
if not any(map(lambda x: isinstance(arg, x), lst_expected_type)):
raise TypeError(err_msg) | Check if the input is of expected type.
Parameters
----------
arg : Input argument.
expected_type : A type OR a list of types that the argument is expected
to be.
arg_name : The name of the variable in the function being used. No
name is assumed if set to None.
Examples
--------
_raise_error_if_not_of_type(sf, str, 'sf')
_raise_error_if_not_of_type(sf, [str, int], 'sf') |
def _add_new_items(self, config, seen):
'''Add new (unseen) items to the config.'''
for (key, value) in self.items():
if key not in seen:
self._set_value(config, key, value) | Add new (unseen) items to the config. |
def _check_series_localize_timestamps(s, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
:param s: pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series that have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64tz_dtype
tz = timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(tz).dt.tz_localize(None)
else:
return s | Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
:param s: pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series that have been converted to tz-naive |
def getStats(self):
"""
TODO: This method needs to be enhanced to get the stats on the *aggregated*
records.
:returns: stats (like min and max values of the fields).
"""
# The record store returns a dict of stats, each value in this dict is
# a list with one item per field of the record store
# {
# 'min' : [f1_min, f2_min, f3_min],
# 'max' : [f1_max, f2_max, f3_max]
# }
recordStoreStats = self._recordStore.getStats()
# We need to convert each item to represent the fields of the *stream*
streamStats = dict()
for (key, values) in recordStoreStats.items():
fieldStats = dict(zip(self._recordStoreFieldNames, values))
streamValues = []
for name in self._streamFieldNames:
streamValues.append(fieldStats[name])
streamStats[key] = streamValues
return streamStats | TODO: This method needs to be enhanced to get the stats on the *aggregated*
records.
:returns: stats (like min and max values of the fields). |
def _process_state_embryo(self, job_record):
""" method that takes care of processing job records in STATE_EMBRYO state"""
start_timeperiod = self.compute_start_timeperiod(job_record.process_name, job_record.timeperiod)
end_timeperiod = self.compute_end_timeperiod(job_record.process_name, job_record.timeperiod)
self._compute_and_transfer_to_progress(job_record.process_name, start_timeperiod,
end_timeperiod, job_record) | method that takes care of processing job records in STATE_EMBRYO state |
def channels(self):
"""Output channels"""
try:
return self._channels
except AttributeError:
logger.debug("initialize output channels ...")
channels = self.args.channels
config_channels = [sec.rpartition('_')[0] for sec in self.config.sections(suffix='_channel')]
unknown = set(channels) - set(config_channels)
if unknown:
raise ValueError("undefined channel %r" % list(unknown))
output_channels = []
for channel in set(channels):
channel_type = self.config.get('%s_channel' % channel, 'type')
if channel_type == 'tty':
output_channels.append(TermChannel(channel, self.args, self.config))
elif channel_type == 'file':
output_channels.append(FileChannel(channel, self.args, self.config))
elif channel_type == 'mail':
output_channels.append(MailChannel(channel, self.args, self.config))
else:
raise LogRaptorConfigError('unknown channel type %r' % channel_type)
return output_channels | Output channels |
def _set_auth_type(self, v, load=False):
"""
Setter method for auth_type, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_vrrp_extended/auth_type (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_auth_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_auth_type() directly.
YANG Description: Authentication type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=auth_type.auth_type, is_container='container', presence=False, yang_name="auth-type", rest_name="auth-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authentication type'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """auth_type must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=auth_type.auth_type, is_container='container', presence=False, yang_name="auth-type", rest_name="auth-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authentication type'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)""",
})
self.__auth_type = t
if hasattr(self, '_set'):
self._set() | Setter method for auth_type, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_vrrp_extended/auth_type (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_auth_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_auth_type() directly.
YANG Description: Authentication type |
def _parallel_exec(self, hosts):
''' handles mulitprocessing when more than 1 fork is required '''
if not hosts:
return
p = multiprocessing.Pool(self.forks)
results = []
#results = p.map(multiprocessing_runner, hosts) # can't handle keyboard interrupt
results = p.map_async(multiprocessing_runner, hosts).get(9999999)
p.close()
p.join()
return results | handles mulitprocessing when more than 1 fork is required |
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2])) | Process a GNU tar extended sparse header, version 0.1. |
def pin_ls(self, type="all", **kwargs):
"""Lists objects pinned to local storage.
By default, all pinned objects are returned, but the ``type`` flag or
arguments can restrict that to a specific pin type or to some specific
objects respectively.
.. code-block:: python
>>> c.pin_ls()
{'Keys': {
'QmNNPMA1eGUbKxeph6yqV8ZmRkdVat … YMuz': {'Type': 'recursive'},
'QmNPZUCeSN5458Uwny8mXSWubjjr6J … kP5e': {'Type': 'recursive'},
'QmNg5zWpRMxzRAVg7FTQ3tUxVbKj8E … gHPz': {'Type': 'indirect'},
…
'QmNiuVapnYCrLjxyweHeuk6Xdqfvts … wCCe': {'Type': 'indirect'}}}
Parameters
----------
type : "str"
The type of pinned keys to list. Can be:
* ``"direct"``
* ``"indirect"``
* ``"recursive"``
* ``"all"``
Returns
-------
dict : Hashes of pinned IPFS objects and why they are pinned
"""
kwargs.setdefault("opts", {"type": type})
return self._client.request('/pin/ls', decoder='json', **kwargs) | Lists objects pinned to local storage.
By default, all pinned objects are returned, but the ``type`` flag or
arguments can restrict that to a specific pin type or to some specific
objects respectively.
.. code-block:: python
>>> c.pin_ls()
{'Keys': {
'QmNNPMA1eGUbKxeph6yqV8ZmRkdVat … YMuz': {'Type': 'recursive'},
'QmNPZUCeSN5458Uwny8mXSWubjjr6J … kP5e': {'Type': 'recursive'},
'QmNg5zWpRMxzRAVg7FTQ3tUxVbKj8E … gHPz': {'Type': 'indirect'},
…
'QmNiuVapnYCrLjxyweHeuk6Xdqfvts … wCCe': {'Type': 'indirect'}}}
Parameters
----------
type : "str"
The type of pinned keys to list. Can be:
* ``"direct"``
* ``"indirect"``
* ``"recursive"``
* ``"all"``
Returns
-------
dict : Hashes of pinned IPFS objects and why they are pinned |
def configure_logging(args):
"""Logging to console"""
log_format = logging.Formatter('%(levelname)s:%(name)s:line %(lineno)s:%(message)s')
log_level = logging.INFO if args.verbose else logging.WARN
log_level = logging.DEBUG if args.debug else log_level
console = logging.StreamHandler()
console.setFormatter(log_format)
console.setLevel(log_level)
root_logger = logging.getLogger()
if len(root_logger.handlers) == 0:
root_logger.addHandler(console)
root_logger.setLevel(log_level)
root_logger.handlers[0].setFormatter(log_format)
logging.getLogger(__name__) | Logging to console |
def copy(self):
"Return a clone of this hash object."
other = _ChainedHashAlgorithm(self._algorithms)
other._hobj = deepcopy(self._hobj)
other._fobj = deepcopy(self._fobj)
return other | Return a clone of this hash object. |
def token(self):
" Get token when needed."
if hasattr(self, '_token'):
return getattr(self, '_token')
# Json formatted auth.
data = json.dumps({'customer_name': self.customer,
'user_name': self.username,
'password': self.password})
# Start session.
response = requests.post(
'https://api2.dynect.net/REST/Session/', data=data,
headers={'Content-Type': 'application/json'})
# convert to data.
content = json.loads(response.content)
if response.status_code != 200:
# Check for errors.
if self.check_error(content, 'failure', 'INVALID_DATA'):
raise self.CredentialsError(
self.response_message(content, 'ERROR'))
raise self.Failure(self.response_message(content, 'ERROR'),
'Unhandled failure')
# Extract token from content
if 'data' in content and 'token' in content['data']:
token = content['data']['token']
else:
raise self.AuthenticationError(response)
setattr(self, '_token', token)
return token | Get token when needed. |
def get_grade_systems_by_genus_type(self, grade_system_genus_type):
"""Gets a ``GradeSystemList`` corresponding to the given grade system genus ``Type`` which does not include systems of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known systems or
an error results. Otherwise, the returned list may contain only
those systems that are accessible through this session.
arg: grade_system_genus_type (osid.type.Type): a grade system
genus type
return: (osid.grading.GradeSystemList) - the returned
``GradeSystem`` list
raise: NullArgument - ``grade_system_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_genus_type
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('grading',
collection='GradeSystem',
runtime=self._runtime)
result = collection.find(
dict({'genusTypeId': str(grade_system_genus_type)},
**self._view_filter())).sort('_id', DESCENDING)
return objects.GradeSystemList(result, runtime=self._runtime, proxy=self._proxy) | Gets a ``GradeSystemList`` corresponding to the given grade system genus ``Type`` which does not include systems of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known systems or
an error results. Otherwise, the returned list may contain only
those systems that are accessible through this session.
arg: grade_system_genus_type (osid.type.Type): a grade system
genus type
return: (osid.grading.GradeSystemList) - the returned
``GradeSystem`` list
raise: NullArgument - ``grade_system_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def list_logs(args, container_name=None):
'''list a specific log for a builder, or the latest log if none provided
Parameters
==========
args: the argparse object to look for a container name
container_name: a default container name set to be None (show latest log)
'''
from sregistry.main import Client as cli
if len(args.commands) > 0:
container_name = args.commands.pop(0)
cli.logs(container_name)
sys.exit(0) | list a specific log for a builder, or the latest log if none provided
Parameters
==========
args: the argparse object to look for a container name
container_name: a default container name set to be None (show latest log) |
def print_commands(self, out=sys.stdout):
'''
utility method to print commands
and descriptions for @BotFather
'''
cmds = self.list_commands()
for ck in cmds:
if ck.printable:
out.write('%s\n' % ck) | utility method to print commands
and descriptions for @BotFather |
def build_common_all_meta_df(common_meta_dfs, fields_to_remove, remove_all_metadata_fields):
"""
concatenate the entries in common_meta_dfs, removing columns selectively (fields_to_remove) or entirely (
remove_all_metadata_fields=True; in this case, effectively just merges all the indexes in common_meta_dfs).
Returns 2 dataframes (in a tuple): the first has duplicates removed, the second does not.
Args:
common_meta_dfs: collection of pandas DataFrames containing the metadata in the "common" direction of the
concatenation operation
fields_to_remove: columns to be removed (if present) from the common_meta_dfs
remove_all_metadata_fields: boolean indicating that all metadata fields should be removed from the
common_meta_dfs; overrides fields_to_remove if present
Returns:
tuple containing
all_meta_df: pandas dataframe that is the concatenation of the dataframes in common_meta_dfs,
all_meta_df_with_dups:
"""
if remove_all_metadata_fields:
trimmed_common_meta_dfs = [pd.DataFrame(index=df.index) for df in common_meta_dfs]
else:
shared_column_headers = sorted(set.intersection(*[set(df.columns) for df in common_meta_dfs]))
logger.debug("shared_column_headers: {}".format(shared_column_headers))
trimmed_common_meta_dfs = [df[shared_column_headers] for df in common_meta_dfs]
# Remove any column headers that will prevent dfs from being identical
for df in trimmed_common_meta_dfs:
df.drop(fields_to_remove, axis=1, errors="ignore", inplace=True)
# Concatenate all dfs and then remove duplicate rows
all_meta_df_with_dups = pd.concat(trimmed_common_meta_dfs, axis=0)
logger.debug("all_meta_df_with_dups.shape: {}".format(all_meta_df_with_dups.shape))
logger.debug("all_meta_df_with_dups.columns: {}".format(all_meta_df_with_dups.columns))
logger.debug("all_meta_df_with_dups.index: {}".format(all_meta_df_with_dups.index))
# If all metadata dfs were empty, df will be empty
if all_meta_df_with_dups.empty:
# Simply return unique ids
all_meta_df = pd.DataFrame(index=all_meta_df_with_dups.index.unique())
else:
all_meta_df_with_dups["concat_column_for_index"] = all_meta_df_with_dups.index
all_meta_df = all_meta_df_with_dups.copy(deep=True).drop_duplicates()
all_meta_df.drop("concat_column_for_index", axis=1, inplace=True)
all_meta_df_with_dups.drop("concat_column_for_index", axis=1, inplace=True)
logger.debug("all_meta_df_with_dups.shape: {}".format(all_meta_df_with_dups.shape))
logger.debug("all_meta_df.shape: {}".format(all_meta_df.shape))
return (all_meta_df, all_meta_df_with_dups) | concatenate the entries in common_meta_dfs, removing columns selectively (fields_to_remove) or entirely (
remove_all_metadata_fields=True; in this case, effectively just merges all the indexes in common_meta_dfs).
Returns 2 dataframes (in a tuple): the first has duplicates removed, the second does not.
Args:
common_meta_dfs: collection of pandas DataFrames containing the metadata in the "common" direction of the
concatenation operation
fields_to_remove: columns to be removed (if present) from the common_meta_dfs
remove_all_metadata_fields: boolean indicating that all metadata fields should be removed from the
common_meta_dfs; overrides fields_to_remove if present
Returns:
tuple containing
all_meta_df: pandas dataframe that is the concatenation of the dataframes in common_meta_dfs,
all_meta_df_with_dups: |
def _ls_print_summary(all_trainings: List[Tuple[str, dict, TrainingTrace]]) -> None:
"""
Print trainings summary.
In particular print tables summarizing the number of trainings with
- particular model names
- particular combinations of models and datasets
:param all_trainings: a list of training tuples (train_dir, configuration dict, trace)
"""
counts_by_name = defaultdict(int)
counts_by_classes = defaultdict(int)
for _, config, _ in all_trainings:
counts_by_name[get_model_name(config)] += 1
counts_by_classes[get_classes(config)] += 1
print_boxed('summary')
print()
counts_table = [[name, count] for name, count in counts_by_name.items()]
print(tabulate(counts_table, headers=['model.name', 'count'], tablefmt='grid'))
print()
counts_table = [[classes[0], classes[1], count] for classes, count in counts_by_classes.items()]
print(tabulate(counts_table, headers=['model.class', 'dataset.class', 'count'], tablefmt='grid'))
print() | Print trainings summary.
In particular print tables summarizing the number of trainings with
- particular model names
- particular combinations of models and datasets
:param all_trainings: a list of training tuples (train_dir, configuration dict, trace) |
def collect(nested_nodes, transform=None):
'''
Return list containing the result of the `transform` function applied to
each item in the supplied list of nested nodes.
A custom transform function may be applied to each entry during the
flattening by specifying a function through the `transform` keyword
argument. The `transform` function will be passed the following arguments:
- `node`: The node/key of the entry.
- `parents`: The node/key of the parents as a `list`.
- `nodes`: The children of the entry.
By default, the `transform` function simply returns the node/key, resulting
in a flattened version of the original nested nodes structure.
'''
items = []
if transform is None:
transform = lambda node, parents, nodes, *args: node
def __collect__(node, parents, nodes, first, last, depth):
items.append(transform(node, parents, nodes, first, last, depth))
apply_depth_first(nested_nodes, __collect__)
return items | Return list containing the result of the `transform` function applied to
each item in the supplied list of nested nodes.
A custom transform function may be applied to each entry during the
flattening by specifying a function through the `transform` keyword
argument. The `transform` function will be passed the following arguments:
- `node`: The node/key of the entry.
- `parents`: The node/key of the parents as a `list`.
- `nodes`: The children of the entry.
By default, the `transform` function simply returns the node/key, resulting
in a flattened version of the original nested nodes structure. |
def task_done(self, **kw):
"""
Marks a pending task as done, optionally specifying a completion
date with the 'end' argument.
"""
def validate(task):
if not Status.is_pending(task['status']):
raise ValueError("Task is not pending.")
return self._task_change_status(Status.COMPLETED, validate, **kw) | Marks a pending task as done, optionally specifying a completion
date with the 'end' argument. |
def comment(self, text):
"""
Make a top-level comment to this.
:param text: The comment text.
"""
url = self._imgur._base_url + "/3/comment"
payload = {'image_id': self.id, 'comment': text}
resp = self._imgur._send_request(url, params=payload, needs_auth=True,
method='POST')
return Comment(resp, imgur=self._imgur, has_fetched=False) | Make a top-level comment to this.
:param text: The comment text. |
def get_imports(self, option):
"""
See if we have been passed a set of currencies or a setting variable
or look for settings CURRENCIES or SHOP_CURRENCIES.
"""
if option:
if len(option) == 1 and option[0].isupper() and len(option[0]) > 3:
return getattr(settings, option[0])
else:
codes = [e for e in option if e.isupper() and len(e) == 3]
if len(codes) != len(option):
raise ImproperlyConfigured("Invalid currency codes found: %s" % codes)
return codes
for attr in ('CURRENCIES', 'SHOP_CURRENCIES'):
try:
return getattr(settings, attr)
except AttributeError:
continue
return option | See if we have been passed a set of currencies or a setting variable
or look for settings CURRENCIES or SHOP_CURRENCIES. |
def attach_volume(self, xml_bytes):
"""Parse the XML returned by the C{AttachVolume} function.
@param xml_bytes: XML bytes with a C{AttachVolumeResponse} root
element.
@return: a C{dict} with status and attach_time keys.
TODO: volumeId, instanceId, device
"""
root = XML(xml_bytes)
status = root.findtext("status")
attach_time = root.findtext("attachTime")
attach_time = datetime.strptime(
attach_time[:19], "%Y-%m-%dT%H:%M:%S")
return {"status": status, "attach_time": attach_time} | Parse the XML returned by the C{AttachVolume} function.
@param xml_bytes: XML bytes with a C{AttachVolumeResponse} root
element.
@return: a C{dict} with status and attach_time keys.
TODO: volumeId, instanceId, device |
def to_xml(self):
"""Spit this resource record set out as XML"""
if self.alias_hosted_zone_id != None and self.alias_dns_name != None:
# Use alias
body = self.AliasBody % (self.alias_hosted_zone_id, self.alias_dns_name)
else:
# Use resource record(s)
records = ""
for r in self.resource_records:
records += self.ResourceRecordBody % r
body = self.ResourceRecordsBody % {
"ttl": self.ttl,
"records": records,
}
weight = ""
if self.identifier != None and self.weight != None:
weight = self.WRRBody % {"identifier": self.identifier, "weight":
self.weight}
params = {
"name": self.name,
"type": self.type,
"weight": weight,
"body": body,
}
return self.XMLBody % params | Spit this resource record set out as XML |
def register_dimensions(self, dims):
"""
Register multiple dimensions on the cube.
.. code-block:: python
cube.register_dimensions([
{'name' : 'ntime', 'global_size' : 10,
'lower_extent' : 2, 'upper_extent' : 7 },
{'name' : 'na', 'global_size' : 3,
'lower_extent' : 2, 'upper_extent' : 7 },
])
Parameters
----------
dims : list or dict
A list or dictionary of dimensions
"""
if isinstance(dims, collections.Mapping):
dims = dims.itervalues()
for dim in dims:
self.register_dimension(dim.name, dim) | Register multiple dimensions on the cube.
.. code-block:: python
cube.register_dimensions([
{'name' : 'ntime', 'global_size' : 10,
'lower_extent' : 2, 'upper_extent' : 7 },
{'name' : 'na', 'global_size' : 3,
'lower_extent' : 2, 'upper_extent' : 7 },
])
Parameters
----------
dims : list or dict
A list or dictionary of dimensions |
def from_unidiff(cls, diff: str) -> 'Patch':
"""
Constructs a Patch from a provided unified format diff.
"""
lines = diff.split('\n')
file_patches = []
while lines:
if lines[0] == '' or lines[0].isspace():
lines.pop(0)
continue
file_patches.append(FilePatch._read_next(lines))
return Patch(file_patches) | Constructs a Patch from a provided unified format diff. |
def restore_review_history_for_affected_objects(portal):
"""Applies the review history for objects that are bound to new senaite_*
workflows
"""
logger.info("Restoring review_history ...")
query = dict(portal_type=NEW_SENAITE_WORKFLOW_BINDINGS)
brains = api.search(query, UID_CATALOG)
total = len(brains)
done = 0
for num, brain in enumerate(brains):
if num % 100 == 0:
logger.info("Restoring review_history: {}/{}"
.format(num, total))
review_history = api.get_review_history(brain, rev=False)
if review_history:
# Nothing to do. The object already has the review history set
continue
# Object without review history. Set the review_history manually
restore_review_history_for(brain)
done += 1
if done % 1000 == 0:
commit_transaction(portal)
logger.info("Restoring review history: {} processed [DONE]".format(done)) | Applies the review history for objects that are bound to new senaite_*
workflows |
def LOS_CrossProj(VType, Ds, us, kPIns, kPOuts, kRMins,
Lplot='In', proj='All', multi=False):
""" Compute the parameters to plot the poloidal projection of the LOS """
assert type(VType) is str and VType.lower() in ['tor','lin']
assert Lplot.lower() in ['tot','in']
assert type(proj) is str
proj = proj.lower()
assert proj in ['cross','hor','all','3d']
assert Ds.ndim==2 and Ds.shape==us.shape
nL = Ds.shape[1]
k0 = kPIns if Lplot.lower()=='in' else np.zeros((nL,))
if VType.lower()=='tor' and proj in ['cross','all']:
CrossProjAng = np.arccos(np.sqrt(us[0,:]**2+us[1,:]**2)
/np.sqrt(np.sum(us**2,axis=0)))
nkp = np.ceil(25.*(1 - (CrossProjAng/(np.pi/4)-1)**2) + 2)
ks = np.max([kRMins,kPIns],axis=0) if Lplot.lower()=='in' else kRMins
pts0 = []
if multi:
for ii in range(0,nL):
if np.isnan(kPOuts[ii]):
pts0.append( np.array([[np.nan,np.nan],
[np.nan,np.nan]]) )
else:
k = np.linspace(k0[ii],kPOuts[ii],nkp[ii],endpoint=True)
k = np.unique(np.append(k,ks[ii]))
pp = Ds[:,ii:ii+1] + k[np.newaxis,:]*us[:,ii:ii+1]
pts0.append( np.array([np.hypot(pp[0,:],pp[1,:]),pp[2,:]]) )
else:
for ii in range(0,nL):
if np.isnan(kPOuts[ii]):
pts0.append(np.array([[np.nan,np.nan,np.nan],
[np.nan,np.nan,np.nan],
[np.nan,np.nan,np.nan]]))
else:
k = np.linspace(k0[ii],kPOuts[ii],nkp[ii],endpoint=True)
k = np.append(np.unique(np.append(k,ks[ii])),np.nan)
pts0.append( Ds[:,ii:ii+1] + k[np.newaxis,:]*us[:,ii:ii+1] )
pts0 = np.concatenate(tuple(pts0),axis=1)
pts0 = np.array([np.hypot(pts0[0,:],pts0[1,:]),pts0[2,:]])
if not (VType.lower()=='tor' and proj=='cross'):
pts = []
if multi:
for ii in range(0,nL):
if np.isnan(kPOuts[ii]):
pts.append( np.array([[np.nan,np.nan],
[np.nan,np.nan],
[np.nan,np.nan]]) )
else:
k = np.array([k0[ii],kPOuts[ii]])
pts.append( Ds[:,ii:ii+1] + k[np.newaxis,:]*us[:,ii:ii+1] )
else:
for ii in range(0,nL):
if np.isnan(kPOuts[ii]):
pts.append(np.array([[np.nan,np.nan,np.nan],
[np.nan,np.nan,np.nan],
[np.nan,np.nan,np.nan]]))
else:
k = np.array([k0[ii],kPOuts[ii],np.nan])
pts.append( Ds[:,ii:ii+1] + k[np.newaxis,:]*us[:,ii:ii+1] )
pts = np.concatenate(tuple(pts),axis=1)
if proj=='hor':
pts = [pp[:2,:] for pp in pts] if multi else pts[:2,:]
elif proj=='cross':
if VType.lower()=='tor':
pts = pts0
else:
pts = [pp[1:,:] for pp in pts] if multi else pts[1:,:]
elif proj=='all':
if multi:
if VType.lower()=='tor':
pts = [(p0,pp[:2,:]) for (p0,pp) in zip(*[pts0,pts])]
else:
pts = (pts[1:,:],pts[:2,:])
else:
pts = (pts0,pts[:2,:]) if VType.lower()=='tor' else (pts[1:,:],pts[:2,:])
return pts | Compute the parameters to plot the poloidal projection of the LOS |
def get_cfgdict_list_subset(cfgdict_list, keys):
r"""
returns list of unique dictionaries only with keys specified in keys
Args:
cfgdict_list (list):
keys (list):
Returns:
list: cfglbl_list
CommandLine:
python -m utool.util_gridsearch --test-get_cfgdict_list_subset
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> # build test data
>>> cfgdict_list = [
... {'K': 3, 'dcvs_clip_max': 0.1, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.1, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.1, 'p': 0.2},
... {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.2, 'p': 0.1},
... {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}]
>>> keys = ['K', 'dcvs_clip_max']
>>> # execute function
>>> cfgdict_sublist = get_cfgdict_list_subset(cfgdict_list, keys)
>>> # verify results
>>> result = ut.repr4(cfgdict_sublist)
>>> print(result)
[
{'K': 3, 'dcvs_clip_max': 0.1},
{'K': 5, 'dcvs_clip_max': 0.1},
{'K': 3, 'dcvs_clip_max': 0.2},
{'K': 5, 'dcvs_clip_max': 0.2},
]
"""
import utool as ut
cfgdict_sublist_ = [ut.dict_subset(cfgdict, keys) for cfgdict in cfgdict_list]
cfgtups_sublist_ = [tuple(ut.dict_to_keyvals(cfgdict)) for cfgdict in cfgdict_sublist_]
cfgtups_sublist = ut.unique_ordered(cfgtups_sublist_)
cfgdict_sublist = list(map(dict, cfgtups_sublist))
return cfgdict_sublist | r"""
returns list of unique dictionaries only with keys specified in keys
Args:
cfgdict_list (list):
keys (list):
Returns:
list: cfglbl_list
CommandLine:
python -m utool.util_gridsearch --test-get_cfgdict_list_subset
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> # build test data
>>> cfgdict_list = [
... {'K': 3, 'dcvs_clip_max': 0.1, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.1, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.1, 'p': 0.2},
... {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.2, 'p': 0.1},
... {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}]
>>> keys = ['K', 'dcvs_clip_max']
>>> # execute function
>>> cfgdict_sublist = get_cfgdict_list_subset(cfgdict_list, keys)
>>> # verify results
>>> result = ut.repr4(cfgdict_sublist)
>>> print(result)
[
{'K': 3, 'dcvs_clip_max': 0.1},
{'K': 5, 'dcvs_clip_max': 0.1},
{'K': 3, 'dcvs_clip_max': 0.2},
{'K': 5, 'dcvs_clip_max': 0.2},
] |
def diropenbox(msg=None
, title=None
, default=None
):
"""
A dialog to get a directory name.
Note that the msg argument, if specified, is ignored.
Returns the name of a directory, or None if user chose to cancel.
If the "default" argument specifies a directory name, and that
directory exists, then the dialog box will start with that directory.
"""
if sys.platform == 'darwin':
_bring_to_front()
title=getFileDialogTitle(msg,title)
localRoot = Tk()
localRoot.withdraw()
if not default: default = None
f = tk_FileDialog.askdirectory(
parent=localRoot
, title=title
, initialdir=default
, initialfile=None
)
localRoot.destroy()
if not f: return None
return os.path.normpath(f) | A dialog to get a directory name.
Note that the msg argument, if specified, is ignored.
Returns the name of a directory, or None if user chose to cancel.
If the "default" argument specifies a directory name, and that
directory exists, then the dialog box will start with that directory. |
def dumps(obj, big_endian=True):
"""
Dump a GeoJSON-like `dict` to a WKB string.
.. note::
The dimensions of the generated WKB will be inferred from the first
vertex in the GeoJSON `coordinates`. It will be assumed that all
vertices are uniform. There are 4 types:
- 2D (X, Y): 2-dimensional geometry
- Z (X, Y, Z): 3-dimensional geometry
- M (X, Y, M): 2-dimensional geometry with a "Measure"
- ZM (X, Y, Z, M): 3-dimensional geometry with a "Measure"
If the first vertex contains 2 values, we assume a 2D geometry.
If the first vertex contains 3 values, this is slightly ambiguous and
so the most common case is chosen: Z.
If the first vertex contains 4 values, we assume a ZM geometry.
The WKT/WKB standards provide a way of differentiating normal (2D), Z,
M, and ZM geometries (http://en.wikipedia.org/wiki/Well-known_text),
but the GeoJSON spec does not. Therefore, for the sake of interface
simplicity, we assume that geometry that looks 3D contains XYZ
components, instead of XYM.
If the coordinates list has no coordinate values (this includes nested
lists, for example, `[[[[],[]], []]]`, the geometry is considered to be
empty. Geometries, with the exception of points, have a reasonable
"empty" representation in WKB; however, without knowing the number of
coordinate values per vertex, the type is ambigious, and thus we don't
know if the geometry type is 2D, Z, M, or ZM. Therefore in this case
we expect a `ValueError` to be raised.
:param dict obj:
GeoJson-like `dict` object.
:param bool big_endian:
Defaults to `True`. If `True`, data values in the generated WKB will
be represented using big endian byte order. Else, little endian.
TODO: remove this
:param str dims:
Indicates to WKB representation desired from converting the given
GeoJSON `dict` ``obj``. The accepted values are:
* '2D': 2-dimensional geometry (X, Y)
* 'Z': 3-dimensional geometry (X, Y, Z)
* 'M': 3-dimensional geometry (X, Y, M)
* 'ZM': 4-dimensional geometry (X, Y, Z, M)
:returns:
A WKB binary string representing of the ``obj``.
"""
geom_type = obj['type']
meta = obj.get('meta', {})
exporter = _dumps_registry.get(geom_type)
if exporter is None:
_unsupported_geom_type(geom_type)
# Check for empty geometries. GeometryCollections have a slightly different
# JSON/dict structure, but that's handled.
coords_or_geoms = obj.get('coordinates', obj.get('geometries'))
if len(list(flatten_multi_dim(coords_or_geoms))) == 0:
raise ValueError(
'Empty geometries cannot be represented in WKB. Reason: The '
'dimensionality of the WKB would be ambiguous.'
)
return exporter(obj, big_endian, meta) | Dump a GeoJSON-like `dict` to a WKB string.
.. note::
The dimensions of the generated WKB will be inferred from the first
vertex in the GeoJSON `coordinates`. It will be assumed that all
vertices are uniform. There are 4 types:
- 2D (X, Y): 2-dimensional geometry
- Z (X, Y, Z): 3-dimensional geometry
- M (X, Y, M): 2-dimensional geometry with a "Measure"
- ZM (X, Y, Z, M): 3-dimensional geometry with a "Measure"
If the first vertex contains 2 values, we assume a 2D geometry.
If the first vertex contains 3 values, this is slightly ambiguous and
so the most common case is chosen: Z.
If the first vertex contains 4 values, we assume a ZM geometry.
The WKT/WKB standards provide a way of differentiating normal (2D), Z,
M, and ZM geometries (http://en.wikipedia.org/wiki/Well-known_text),
but the GeoJSON spec does not. Therefore, for the sake of interface
simplicity, we assume that geometry that looks 3D contains XYZ
components, instead of XYM.
If the coordinates list has no coordinate values (this includes nested
lists, for example, `[[[[],[]], []]]`, the geometry is considered to be
empty. Geometries, with the exception of points, have a reasonable
"empty" representation in WKB; however, without knowing the number of
coordinate values per vertex, the type is ambigious, and thus we don't
know if the geometry type is 2D, Z, M, or ZM. Therefore in this case
we expect a `ValueError` to be raised.
:param dict obj:
GeoJson-like `dict` object.
:param bool big_endian:
Defaults to `True`. If `True`, data values in the generated WKB will
be represented using big endian byte order. Else, little endian.
TODO: remove this
:param str dims:
Indicates to WKB representation desired from converting the given
GeoJSON `dict` ``obj``. The accepted values are:
* '2D': 2-dimensional geometry (X, Y)
* 'Z': 3-dimensional geometry (X, Y, Z)
* 'M': 3-dimensional geometry (X, Y, M)
* 'ZM': 4-dimensional geometry (X, Y, Z, M)
:returns:
A WKB binary string representing of the ``obj``. |
def download(url, path, kind='file',
progressbar=True, replace=False, timeout=10., verbose=True):
"""Download a URL.
This will download a file and store it in a '~/data/` folder,
creating directories if need be. It will also work for zip
files, in which case it will unzip all of the files to the
desired location.
Parameters
----------
url : string
The url of the file to download. This may be a dropbox
or google drive "share link", or a regular URL. If it
is a share link, then it should point to a single file and
not a folder. To download folders, zip them first.
path : string
The path where the downloaded file will be stored. If ``zipfile``
is True, then this must be a folder into which files will be zipped.
kind : one of ['file', 'zip', 'tar', 'tar.gz']
The kind of file to be downloaded. If not 'file', then the file
contents will be unpackaged according to the kind specified. Package
contents will be placed in ``root_destination/<name>``.
progressbar : bool
Whether to display a progress bar during file download.
replace : bool
If True and the URL points to a single file, overwrite the
old file if possible.
timeout : float
The URL open timeout.
verbose : bool
Whether to print download status to the screen.
Returns
-------
out_path : string
A path to the downloaded file (or folder, in the case of
a zip file).
"""
if kind not in ALLOWED_KINDS:
raise ValueError('`kind` must be one of {}, got {}'.format(
ALLOWED_KINDS, kind))
# Make sure we have directories to dump files
path = op.expanduser(path)
if len(path) == 0:
raise ValueError('You must specify a path. For current directory use .')
download_url = _convert_url_to_downloadable(url)
if replace is False and op.exists(path):
msg = ('Replace is False and data exists, so doing nothing. '
'Use replace==True to re-download the data.')
elif kind in ZIP_KINDS:
# Create new folder for data if we need it
if not op.isdir(path):
if verbose:
tqdm.write('Creating data folder...')
os.makedirs(path)
# Download the file to a temporary folder to unzip
path_temp = _TempDir()
path_temp_file = op.join(path_temp, "tmp.{}".format(kind))
_fetch_file(download_url, path_temp_file, timeout=timeout,
verbose=verbose)
# Unzip the file to the out path
if verbose:
tqdm.write('Extracting {} file...'.format(kind))
if kind == 'zip':
zipper = ZipFile
elif kind == 'tar':
zipper = tarfile.open
elif kind == 'tar.gz':
zipper = partial(tarfile.open, mode='r:gz')
with zipper(path_temp_file) as myobj:
myobj.extractall(path)
msg = 'Successfully downloaded / unzipped to {}'.format(path)
else:
if not op.isdir(op.dirname(path)):
os.makedirs(op.dirname(path))
_fetch_file(download_url, path, timeout=timeout, verbose=verbose)
msg = 'Successfully downloaded file to {}'.format(path)
if verbose:
tqdm.write(msg)
return path | Download a URL.
This will download a file and store it in a '~/data/` folder,
creating directories if need be. It will also work for zip
files, in which case it will unzip all of the files to the
desired location.
Parameters
----------
url : string
The url of the file to download. This may be a dropbox
or google drive "share link", or a regular URL. If it
is a share link, then it should point to a single file and
not a folder. To download folders, zip them first.
path : string
The path where the downloaded file will be stored. If ``zipfile``
is True, then this must be a folder into which files will be zipped.
kind : one of ['file', 'zip', 'tar', 'tar.gz']
The kind of file to be downloaded. If not 'file', then the file
contents will be unpackaged according to the kind specified. Package
contents will be placed in ``root_destination/<name>``.
progressbar : bool
Whether to display a progress bar during file download.
replace : bool
If True and the URL points to a single file, overwrite the
old file if possible.
timeout : float
The URL open timeout.
verbose : bool
Whether to print download status to the screen.
Returns
-------
out_path : string
A path to the downloaded file (or folder, in the case of
a zip file). |
def find_by_id(self, team, params={}, **options):
"""Returns the full record for a single team.
Parameters
----------
team : {Id} Globally unique identifier for the team.
[params] : {Object} Parameters for the request
"""
path = "/teams/%s" % (team)
return self.client.get(path, params, **options) | Returns the full record for a single team.
Parameters
----------
team : {Id} Globally unique identifier for the team.
[params] : {Object} Parameters for the request |
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self._query_compiler.index) * len(self._query_compiler.columns) | Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame. |
def __value_compare(self, target):
"""
Comparing result based on expectation if arg_type is "VALUE"
Args: Anything
Return: Boolean
"""
if self.expectation == "__ANY__":
return True
elif self.expectation == "__DEFINED__":
return True if target is not None else False
elif self.expectation == "__TYPE__":
return True if type(target) == self.target_type else False #pylint:disable=unidiomatic-typecheck
elif self.expectation == "__INSTANCE__":
return True if isinstance(target, self.target_type.__class__) else False
else:
return True if target == self.expectation else False | Comparing result based on expectation if arg_type is "VALUE"
Args: Anything
Return: Boolean |
def generate(organization, package, destination):
"""Generates the Sphinx configuration and Makefile.
Args:
organization (str): the organization name.
package (str): the package to be documented.
destination (str): the destination directory.
"""
gen = ResourceGenerator(organization, package)
tmp = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
try:
tmp.write(gen.conf())
finally:
tmp.close()
shutil.copy(tmp.name, os.path.join(destination, 'conf.py'))
tmp = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
try:
tmp.write(gen.makefile())
finally:
tmp.close()
shutil.copy(tmp.name, os.path.join(destination, 'Makefile')) | Generates the Sphinx configuration and Makefile.
Args:
organization (str): the organization name.
package (str): the package to be documented.
destination (str): the destination directory. |
def get_random_service(
service_registry: ServiceRegistry,
block_identifier: BlockSpecification,
) -> Tuple[Optional[str], Optional[str]]:
"""Selects a random PFS from service_registry.
Returns a tuple of the chosen services url and eth address.
If there are no PFS in the given registry, it returns (None, None).
"""
count = service_registry.service_count(block_identifier=block_identifier)
if count == 0:
return None, None
index = random.SystemRandom().randint(0, count - 1)
address = service_registry.get_service_address(
block_identifier=block_identifier,
index=index,
)
# We are using the same blockhash for both blockchain queries so the address
# should exist for this query. Additionally at the moment there is no way for
# services to be removed from the registry.
assert address, 'address should exist for this index'
url = service_registry.get_service_url(
block_identifier=block_identifier,
service_hex_address=address,
)
return url, address | Selects a random PFS from service_registry.
Returns a tuple of the chosen services url and eth address.
If there are no PFS in the given registry, it returns (None, None). |
def sm_dict2lha(d):
"""Convert a a dictionary of SM parameters into
a dictionary that pylha can convert into a DSixTools SM output file."""
blocks = OrderedDict([
('GAUGE', {'values': [[1, d['g'].real], [2, d['gp'].real], [3, d['gs'].real]]}),
('SCALAR', {'values': [[1, d['Lambda'].real], [2, d['m2'].real]]}),
('GU', {'values': matrix2lha(d['Gu'].real)}),
('IMGU', {'values': matrix2lha(d['Gu'].imag)}),
('GD', {'values': matrix2lha(d['Gd'].real)}),
('IMGD', {'values': matrix2lha(d['Gd'].imag)}),
('GE', {'values': matrix2lha(d['Ge'].real)}),
('IMGE', {'values': matrix2lha(d['Ge'].imag)}),
('THETA', {'values': [[1, d['Theta'].real], [2, d['Thetap'].real], [3, d['Thetas'].real]]}),
])
return {'BLOCK': blocks} | Convert a a dictionary of SM parameters into
a dictionary that pylha can convert into a DSixTools SM output file. |
def flush(self, timeout=60):
"""
Sends a ping to the server expecting a pong back ensuring
what we have written so far has made it to the server and
also enabling measuring of roundtrip time.
In case a pong is not returned within the allowed timeout,
then it will raise ErrTimeout.
"""
if timeout <= 0:
raise ErrBadTimeout
if self.is_closed:
raise ErrConnectionClosed
future = asyncio.Future(loop=self._loop)
try:
yield from self._send_ping(future)
yield from asyncio.wait_for(future, timeout, loop=self._loop)
except asyncio.TimeoutError:
future.cancel()
raise ErrTimeout | Sends a ping to the server expecting a pong back ensuring
what we have written so far has made it to the server and
also enabling measuring of roundtrip time.
In case a pong is not returned within the allowed timeout,
then it will raise ErrTimeout. |
def coerce(cls, key, value):
"""Convert plain dictionaries to MutationDict."""
if not isinstance(value, MutationDict):
if isinstance(value, dict):
return MutationDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value | Convert plain dictionaries to MutationDict. |
def open(self):
"""Open an existing database"""
if self._table_exists():
self.mode = "open"
# get table info
self._get_table_info()
return self
else:
# table not found
raise IOError,"Table %s doesn't exist" %self.name | Open an existing database |
def check_all_permissions(sender, **kwargs):
"""
This syncdb checks our PERMISSIONS setting in settings.py and makes sure all those permissions
actually exit.
"""
if not is_permissions_app(sender):
return
config = getattr(settings, 'PERMISSIONS', dict())
# for each of our items
for natural_key, permissions in config.items():
# if the natural key '*' then that means add to all objects
if natural_key == '*':
# for each of our content types
for content_type in ContentType.objects.all():
for permission in permissions:
add_permission(content_type, permission)
# otherwise, this is on a specific content type, add for each of those
else:
app, model = natural_key.split('.')
try:
content_type = ContentType.objects.get_by_natural_key(app, model)
except ContentType.DoesNotExist:
continue
# add each permission
for permission in permissions:
add_permission(content_type, permission) | This syncdb checks our PERMISSIONS setting in settings.py and makes sure all those permissions
actually exit. |
def _run_operation_with_response(self, operation, unpack_res,
exhaust=False, address=None):
"""Run a _Query/_GetMore operation and return a Response.
:Parameters:
- `operation`: a _Query or _GetMore object.
- `unpack_res`: A callable that decodes the wire protocol response.
- `exhaust` (optional): If True, the socket used stays checked out.
It is returned along with its Pool in the Response.
- `address` (optional): Optional address when sending a message
to a specific server, used for getMore.
"""
if operation.exhaust_mgr:
server = self._select_server(
operation.read_preference, operation.session, address=address)
with self._reset_on_error(server.description.address,
operation.session):
return server.run_operation_with_response(
operation.exhaust_mgr.sock,
operation,
True,
self._event_listeners,
exhaust,
unpack_res)
def _cmd(session, server, sock_info, slave_ok):
return server.run_operation_with_response(
sock_info,
operation,
slave_ok,
self._event_listeners,
exhaust,
unpack_res)
return self._retryable_read(
_cmd, operation.read_preference, operation.session,
address=address,
retryable=isinstance(operation, message._Query),
exhaust=exhaust) | Run a _Query/_GetMore operation and return a Response.
:Parameters:
- `operation`: a _Query or _GetMore object.
- `unpack_res`: A callable that decodes the wire protocol response.
- `exhaust` (optional): If True, the socket used stays checked out.
It is returned along with its Pool in the Response.
- `address` (optional): Optional address when sending a message
to a specific server, used for getMore. |
def mach2cas(M, h):
""" Mach to CAS conversion """
tas = mach2tas(M, h)
cas = tas2cas(tas, h)
return cas | Mach to CAS conversion |
def reset_db():
""" drops the *scheduler* database, resets schema """
logger = get_logger(PROCESS_SCHEDULER)
logger.info('Starting *scheduler* DB reset')
ds = ds_manager.ds_factory(logger)
ds._db_client.drop_database(settings.settings['mongo_db_name'])
logger.info('*scheduler* db has been dropped')
connection = ds.connection(COLLECTION_MANAGED_PROCESS)
connection.create_index([(PROCESS_NAME, pymongo.ASCENDING)], unique=True)
connection = ds.connection(COLLECTION_FREERUN_PROCESS)
connection.create_index([(PROCESS_NAME, pymongo.ASCENDING), (ENTRY_NAME, pymongo.ASCENDING)], unique=True)
connection = ds.connection(COLLECTION_UNIT_OF_WORK)
connection.create_index([(PROCESS_NAME, pymongo.ASCENDING),
(TIMEPERIOD, pymongo.ASCENDING),
(START_ID, pymongo.ASCENDING),
(END_ID, pymongo.ASCENDING)], unique=True)
connection = ds.connection(COLLECTION_LOG_RECORDING)
connection.create_index([(PARENT_OBJECT_ID, pymongo.ASCENDING)], unique=True)
# expireAfterSeconds: <int> Used to create an expiring (TTL) collection.
# MongoDB will automatically delete documents from this collection after <int> seconds.
# The indexed field must be a UTC datetime or the data will not expire.
ttl_seconds = settings.settings['db_log_ttl_days'] * 86400 # number of seconds for TTL
connection.create_index(CREATED_AT, expireAfterSeconds=ttl_seconds)
for collection_name in [COLLECTION_JOB_HOURLY, COLLECTION_JOB_DAILY,
COLLECTION_JOB_MONTHLY, COLLECTION_JOB_YEARLY]:
connection = ds.connection(collection_name)
connection.create_index([(PROCESS_NAME, pymongo.ASCENDING), (TIMEPERIOD, pymongo.ASCENDING)], unique=True)
# reset Synergy Flow tables
db_manager.reset_db()
logger.info('*scheduler* db has been recreated') | drops the *scheduler* database, resets schema |
def _set_exp_traffic_class(self, v, load=False):
"""
Setter method for exp_traffic_class, mapped from YANG variable /qos_mpls/map/exp_traffic_class (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_exp_traffic_class is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_exp_traffic_class() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("exp_traffic_class_map_name",exp_traffic_class.exp_traffic_class, yang_name="exp-traffic-class", rest_name="exp-traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='exp-traffic-class-map-name', extensions={u'tailf-common': {u'info': u'Configure Exp traffic class', u'cli-sequence-commands': None, u'callpoint': u'QosMplsCallpoint', u'cli-mode-name': u'exp-traffic-class-$(exp-traffic-class-map-name)'}}), is_container='list', yang_name="exp-traffic-class", rest_name="exp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Exp traffic class', u'cli-sequence-commands': None, u'callpoint': u'QosMplsCallpoint', u'cli-mode-name': u'exp-traffic-class-$(exp-traffic-class-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """exp_traffic_class must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("exp_traffic_class_map_name",exp_traffic_class.exp_traffic_class, yang_name="exp-traffic-class", rest_name="exp-traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='exp-traffic-class-map-name', extensions={u'tailf-common': {u'info': u'Configure Exp traffic class', u'cli-sequence-commands': None, u'callpoint': u'QosMplsCallpoint', u'cli-mode-name': u'exp-traffic-class-$(exp-traffic-class-map-name)'}}), is_container='list', yang_name="exp-traffic-class", rest_name="exp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Exp traffic class', u'cli-sequence-commands': None, u'callpoint': u'QosMplsCallpoint', u'cli-mode-name': u'exp-traffic-class-$(exp-traffic-class-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mpls', defining_module='brocade-qos-mpls', yang_type='list', is_config=True)""",
})
self.__exp_traffic_class = t
if hasattr(self, '_set'):
self._set() | Setter method for exp_traffic_class, mapped from YANG variable /qos_mpls/map/exp_traffic_class (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_exp_traffic_class is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_exp_traffic_class() directly. |
def resource_type_type(loader):
""" Returns a function which validates that resource types string contains only a combination of service,
container, and object. Their shorthand representations are s, c, and o. """
def impl(string):
t_resources = loader.get_models('common.models#ResourceTypes')
if set(string) - set("sco"):
raise ValueError
return t_resources(_str=''.join(set(string)))
return impl | Returns a function which validates that resource types string contains only a combination of service,
container, and object. Their shorthand representations are s, c, and o. |
def textContent(self, text: str) -> None: # type: ignore
"""Set text content to inner node."""
if self._inner_element:
self._inner_element.textContent = text
else:
# Need a trick to call property of super-class
super().textContent = text | Set text content to inner node. |
def select_window(pymux, variables):
"""
Select a window. E.g: select-window -t :3
"""
window_id = variables['<target-window>']
def invalid_window():
raise CommandException('Invalid window: %s' % window_id)
if window_id.startswith(':'):
try:
number = int(window_id[1:])
except ValueError:
invalid_window()
else:
w = pymux.arrangement.get_window_by_index(number)
if w:
pymux.arrangement.set_active_window(w)
else:
invalid_window()
else:
invalid_window() | Select a window. E.g: select-window -t :3 |
def save(self, filename, clobber=True, **kwargs):
"""
Save the `Spectrum1D` object to the specified filename.
:param filename:
The filename to save the Spectrum1D object to.
:type filename:
str
:param clobber: [optional]
Whether to overwrite the `filename` if it already exists.
:type clobber:
bool
:raises IOError:
If the filename exists and we were not asked to clobber it.
"""
if os.path.exists(filename) and not clobber:
raise IOError("filename '{0}' exists and we have been asked not to"\
" clobber it".format(filename))
if not filename.endswith('fits'):
# ASCII
data = np.hstack([
self.disp.reshape(-1, 1),
self.flux.reshape(-1, 1),
self.variance.reshape(-1, 1)
])
return np.savetxt(filename, data, **kwargs)
else:
# Create a tabular FITS format
disp = fits.Column(name='disp', format='1D', array=self.disp)
flux = fits.Column(name='flux', format='1D', array=self.flux)
var = fits.Column(name='variance', format='1D', array=self.variance)
table_hdu = fits.new_table([disp, flux, var])
# Create Primary HDU
hdu = fits.PrimaryHDU()
# Update primary HDU with headers
for key, value in self.headers.iteritems():
if len(key) > 8: # To deal with ESO compatibility
hdu.header.update('HIERARCH {}'.format(key), value)
try:
hdu.header.update(key, value)
except ValueError:
logger.warn("Could not save header key/value combination: "\
"{0} = {1}".format(key, value))
# Create HDU list with our tables
hdulist = fits.HDUList([hdu, table_hdu])
return hdulist.writeto(filename, clobber=clobber, **kwargs) | Save the `Spectrum1D` object to the specified filename.
:param filename:
The filename to save the Spectrum1D object to.
:type filename:
str
:param clobber: [optional]
Whether to overwrite the `filename` if it already exists.
:type clobber:
bool
:raises IOError:
If the filename exists and we were not asked to clobber it. |
def SETB(cpu, dest):
"""
Sets byte if below.
:param cpu: current CPU.
:param dest: destination operand.
"""
dest.write(Operators.ITEBV(dest.size, cpu.CF, 1, 0)) | Sets byte if below.
:param cpu: current CPU.
:param dest: destination operand. |
def clear_threads(self):
"""
Clears the threads snapshot.
"""
for aThread in compat.itervalues(self.__threadDict):
aThread.clear()
self.__threadDict = dict() | Clears the threads snapshot. |
def prt_txt(prt, data_nts, prtfmt=None, nt_fields=None, **kws):
"""Print list of namedtuples into a table using prtfmt."""
lines = get_lines(data_nts, prtfmt, nt_fields, **kws)
if lines:
for line in lines:
prt.write(line)
else:
sys.stdout.write(" 0 items. NOT WRITING\n") | Print list of namedtuples into a table using prtfmt. |
def cmServiceRequest(PriorityLevel_presence=0):
"""CM SERVICE REQUEST Section 9.2.9"""
a = TpPd(pd=0x5)
b = MessageType(mesType=0x24) # 00100100
c = CmServiceTypeAndCiphKeySeqNr()
e = MobileStationClassmark2()
f = MobileId()
packet = a / b / c / e / f
if PriorityLevel_presence is 1:
g = PriorityLevelHdr(ieiPL=0x8, eightBitPL=0x0)
packet = packet / g
return packet | CM SERVICE REQUEST Section 9.2.9 |
def validNormalizeAttributeValue(self, doc, name, value):
"""Does the validation related extra step of the normalization
of attribute values: If the declared value is not CDATA,
then the XML processor must further process the normalized
attribute value by discarding any leading and trailing
space (#x20) characters, and by replacing sequences of
space (#x20) characters by single space (#x20) character. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidNormalizeAttributeValue(doc__o, self._o, name, value)
return ret | Does the validation related extra step of the normalization
of attribute values: If the declared value is not CDATA,
then the XML processor must further process the normalized
attribute value by discarding any leading and trailing
space (#x20) characters, and by replacing sequences of
space (#x20) characters by single space (#x20) character. |
def has_equal_value(state, ordered=False, ndigits=None, incorrect_msg=None):
"""Verify if a student and solution query result match up.
This function must always be used after 'zooming' in on certain columns or records (check_column, check_row or check_result).
``has_equal_value`` then goes over all columns that are still left in the solution query result, and compares each column with the
corresponding column in the student query result.
Args:
ordered: if set to False, the default, all rows are sorted (according
to the first column and the following columns as tie breakers).
if set to True, the order of rows in student and solution query have to match.
digits: if specified, number of decimals to use when comparing column values.
incorrect_msg: if specified, this overrides the automatically generated feedback
message in case a column in the student query result does not match
a column in the solution query result.
:Example:
Suppose we are testing the following SELECT statements
* solution: ``SELECT artist_id as id, name FROM artists ORDER BY name``
* student : ``SELECT artist_id, name FROM artists``
We can write the following SCTs: ::
# passes, as order is not important by default
Ex().check_column('name').has_equal_value()
# fails, as order is deemed important
Ex().check_column('name').has_equal_value(ordered=True)
# check_column fails, as id is not in the student query result
Ex().check_column('id').has_equal_value()
# check_all_columns fails, as id not in the student query result
Ex().check_all_columns().has_equal_value()
"""
if not hasattr(state, "parent"):
raise ValueError(
"You can only use has_equal_value() on the state resulting from check_column, check_row or check_result."
)
if incorrect_msg is None:
incorrect_msg = "Column `{{col}}` seems to be incorrect.{{' Make sure you arranged the rows correctly.' if ordered else ''}}"
# First of all, check if number of rows correspond
has_nrows(state)
if not ordered:
stu_res, sol_res = sort_rows(state)
else:
stu_res = state.student_result
sol_res = state.solution_result
for sol_col_name, sol_col_vals in sol_res.items():
stu_col_vals = stu_res[sol_col_name]
if ndigits is not None:
try:
sol_col_vals = round_seq(sol_col_vals, ndigits)
stu_col_vals = round_seq(stu_col_vals, ndigits)
except:
pass
if sol_col_vals != stu_col_vals:
_msg = state.build_message(
incorrect_msg, fmt_kwargs={"col": sol_col_name, "ordered": ordered}
)
state.do_test(_msg)
return state | Verify if a student and solution query result match up.
This function must always be used after 'zooming' in on certain columns or records (check_column, check_row or check_result).
``has_equal_value`` then goes over all columns that are still left in the solution query result, and compares each column with the
corresponding column in the student query result.
Args:
ordered: if set to False, the default, all rows are sorted (according
to the first column and the following columns as tie breakers).
if set to True, the order of rows in student and solution query have to match.
digits: if specified, number of decimals to use when comparing column values.
incorrect_msg: if specified, this overrides the automatically generated feedback
message in case a column in the student query result does not match
a column in the solution query result.
:Example:
Suppose we are testing the following SELECT statements
* solution: ``SELECT artist_id as id, name FROM artists ORDER BY name``
* student : ``SELECT artist_id, name FROM artists``
We can write the following SCTs: ::
# passes, as order is not important by default
Ex().check_column('name').has_equal_value()
# fails, as order is deemed important
Ex().check_column('name').has_equal_value(ordered=True)
# check_column fails, as id is not in the student query result
Ex().check_column('id').has_equal_value()
# check_all_columns fails, as id not in the student query result
Ex().check_all_columns().has_equal_value() |
def string_to_tokentype(s):
"""
Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String
"""
if isinstance(s, _TokenType):
return s
if not s:
return Token
node = Token
for item in s.split('.'):
node = getattr(node, item)
return node | Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String |
def get_nested_val(key_tuple, dict_obj):
"""Return a value from nested dicts by the order of the given keys tuple.
Parameters
----------
key_tuple : tuple
The keys to use for extraction, in order.
dict_obj : dict
The outer-most dict to extract from.
Returns
-------
value : object
The extracted value, if exists. Otherwise, raises KeyError.
Example
-------
>>> dict_obj = {'a': {'b': 7}}
>>> get_nested_val(('a', 'b'), dict_obj)
7
"""
if len(key_tuple) == 1:
return dict_obj[key_tuple[0]]
return get_nested_val(key_tuple[1:], dict_obj[key_tuple[0]]) | Return a value from nested dicts by the order of the given keys tuple.
Parameters
----------
key_tuple : tuple
The keys to use for extraction, in order.
dict_obj : dict
The outer-most dict to extract from.
Returns
-------
value : object
The extracted value, if exists. Otherwise, raises KeyError.
Example
-------
>>> dict_obj = {'a': {'b': 7}}
>>> get_nested_val(('a', 'b'), dict_obj)
7 |
def validate(self, instance, value):
"""Checks that value is a float and in min/max bounds
Non-float numbers are coerced to floats
"""
try:
floatval = float(value)
if not self.cast and abs(value - floatval) > TOL:
self.error(
instance=instance,
value=value,
extra='Not within tolerance range of {}.'.format(TOL),
)
except (TypeError, ValueError):
self.error(instance, value, extra='Cannot cast to float.')
_in_bounds(self, instance, floatval)
return floatval | Checks that value is a float and in min/max bounds
Non-float numbers are coerced to floats |
def list_controller_revision_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_controller_revision_for_all_namespaces # noqa: E501
list or watch objects of kind ControllerRevision # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_controller_revision_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ControllerRevisionList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_controller_revision_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_controller_revision_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
return data | list_controller_revision_for_all_namespaces # noqa: E501
list or watch objects of kind ControllerRevision # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_controller_revision_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ControllerRevisionList
If the method is called asynchronously,
returns the request thread. |
def hog(image, orientations=8, ksize=(5, 5)):
'''
returns the Histogram of Oriented Gradients
:param ksize: convolution kernel size as (y,x) - needs to be odd
:param orientations: number of orientations in between rad=0 and rad=pi
similar to http://scikit-image.org/docs/dev/auto_examples/plot_hog.html
but faster and with less options
'''
s0, s1 = image.shape[:2]
# speed up the process through saving generated kernels:
try:
k = hog.kernels[str(ksize) + str(orientations)]
except KeyError:
k = _mkConvKernel(ksize, orientations)
hog.kernels[str(ksize) + str(orientations)] = k
out = np.empty(shape=(s0, s1, orientations))
image[np.isnan(image)] = 0
for i in range(orientations):
out[:, :, i] = convolve(image, k[i])
return out | returns the Histogram of Oriented Gradients
:param ksize: convolution kernel size as (y,x) - needs to be odd
:param orientations: number of orientations in between rad=0 and rad=pi
similar to http://scikit-image.org/docs/dev/auto_examples/plot_hog.html
but faster and with less options |
def subprocess_check_output(*args, cwd=None, env=None, stderr=False):
"""
Run a command and capture output
:param *args: List of command arguments
:param cwd: Current working directory
:param env: Command environment
:param stderr: Read on stderr
:returns: Command output
"""
if stderr:
proc = yield from asyncio.create_subprocess_exec(*args, stderr=asyncio.subprocess.PIPE, cwd=cwd, env=env)
output = yield from proc.stderr.read()
else:
proc = yield from asyncio.create_subprocess_exec(*args, stdout=asyncio.subprocess.PIPE, cwd=cwd, env=env)
output = yield from proc.stdout.read()
if output is None:
return ""
# If we received garbage we ignore invalid characters
# it should happens only when user try to use another binary
# and the code of VPCS, dynamips... Will detect it's not the correct binary
return output.decode("utf-8", errors="ignore") | Run a command and capture output
:param *args: List of command arguments
:param cwd: Current working directory
:param env: Command environment
:param stderr: Read on stderr
:returns: Command output |
def cleanup(self, force=False):
"""Clean up Yadis-related services in the session and return
the most-recently-attempted service from the manager, if one
exists.
@param force: True if the manager should be deleted regardless
of whether it's a manager for self.url.
@return: current service endpoint object or None if there is
no current service
"""
manager = self.getManager(force=force)
if manager is not None:
service = manager.current()
self.destroyManager(force=force)
else:
service = None
return service | Clean up Yadis-related services in the session and return
the most-recently-attempted service from the manager, if one
exists.
@param force: True if the manager should be deleted regardless
of whether it's a manager for self.url.
@return: current service endpoint object or None if there is
no current service |
def tocimxml(value):
# pylint: disable=line-too-long
"""
Return the CIM-XML representation of the input object,
as an object of an appropriate subclass of :term:`Element`.
The returned CIM-XML representation is consistent with :term:`DSP0201`.
Parameters:
value (:term:`CIM object`, :term:`CIM data type`, :term:`number`, :class:`py:datetime.datetime`, or tuple/list thereof):
The input object.
Specifying `None` has been deprecated in pywbem 0.12.
Returns:
The CIM-XML representation, as an object of an appropriate subclass of
:term:`Element`.
""" # noqa: E501
if isinstance(value, (tuple, list)):
array_xml = []
for v in value:
if v is None:
if SEND_VALUE_NULL:
array_xml.append(cim_xml.VALUE_NULL())
else:
array_xml.append(cim_xml.VALUE(None))
else:
array_xml.append(cim_xml.VALUE(atomic_to_cim_xml(v)))
value_xml = cim_xml.VALUE_ARRAY(array_xml)
return value_xml
if hasattr(value, 'tocimxml'):
return value.tocimxml()
if value is None:
warnings.warn("A value of None for pywbem.tocimxml() has been "
"deprecated.",
DeprecationWarning, stacklevel=2)
return cim_xml.VALUE(atomic_to_cim_xml(value)) | Return the CIM-XML representation of the input object,
as an object of an appropriate subclass of :term:`Element`.
The returned CIM-XML representation is consistent with :term:`DSP0201`.
Parameters:
value (:term:`CIM object`, :term:`CIM data type`, :term:`number`, :class:`py:datetime.datetime`, or tuple/list thereof):
The input object.
Specifying `None` has been deprecated in pywbem 0.12.
Returns:
The CIM-XML representation, as an object of an appropriate subclass of
:term:`Element`. |
def create_geometry(self, input_geometry, dip, upper_depth, lower_depth,
mesh_spacing=1.0):
'''
If geometry is defined as a numpy array then create instance of
nhlib.geo.line.Line class, otherwise if already instance of class
accept class
:param input_geometry:
Trace (line) of the fault source as either
i) instance of nhlib.geo.line.Line class
ii) numpy.ndarray [Longitude, Latitude]
:param float dip:
Dip of fault surface (in degrees)
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
:param float mesh_spacing:
Spacing of the fault mesh (km) {default = 1.0}
'''
assert((dip > 0.) and (dip <= 90.))
self.dip = dip
self._check_seismogenic_depths(upper_depth, lower_depth)
if not isinstance(input_geometry, Line):
if not isinstance(input_geometry, np.ndarray):
raise ValueError('Unrecognised or unsupported geometry '
'definition')
else:
self.fault_trace = Line([Point(row[0], row[1]) for row in
input_geometry])
else:
self.fault_trace = input_geometry
# Build fault surface
self.geometry = SimpleFaultSurface.from_fault_data(self.fault_trace,
self.upper_depth,
self.lower_depth,
self.dip,
mesh_spacing) | If geometry is defined as a numpy array then create instance of
nhlib.geo.line.Line class, otherwise if already instance of class
accept class
:param input_geometry:
Trace (line) of the fault source as either
i) instance of nhlib.geo.line.Line class
ii) numpy.ndarray [Longitude, Latitude]
:param float dip:
Dip of fault surface (in degrees)
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
:param float mesh_spacing:
Spacing of the fault mesh (km) {default = 1.0} |
def create_widget(self):
""" Create the underlying widget.
"""
context = self.get_context()
d = self.declaration
style = d.style or '@attr/autoCompleteTextViewStyle'
self.widget = AutoCompleteTextView(context, None, style)
self.adapter = ArrayAdapter(context, '@layout/simple_list_item_1') | Create the underlying widget. |
def channels_rename(self, *, channel: str, name: str, **kwargs) -> SlackResponse:
"""Renames a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel'
"""
self._validate_xoxp_token()
kwargs.update({"channel": channel, "name": name})
return self.api_call("channels.rename", json=kwargs) | Renames a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel' |
def hierarchyLookup(self, record):
"""
Looks up additional hierarchy information for the inputed record.
:param record | <orb.Table>
:return (<subclass of orb.Table> || None, <str> column)
"""
def _get_lookup(cls):
if cls in self._hierarchyLookup:
return self._hierarchyLookup[cls]
for base in cls.__bases__:
results = _get_lookup(base)
if results:
return results
return (None, None)
tableType, column = _get_lookup(type(record))
if tableType and column:
return (tableType, column)
default = self._hierarchyLookup.get(None)
if default:
return default
return (None, None) | Looks up additional hierarchy information for the inputed record.
:param record | <orb.Table>
:return (<subclass of orb.Table> || None, <str> column) |
def _certificate_required(cls, hostname, port=XCLI_DEFAULT_PORT,
ca_certs=None, validate=None):
'''
returns true if connection should verify certificate
'''
if not ca_certs:
return False
xlog.debug("CONNECT SSL %s:%s, cert_file=%s",
hostname, port, ca_certs)
certificate = ssl.get_server_certificate((hostname, port),
ca_certs=None)
# handle XIV pre-defined certifications
# if a validation function was given - we let the user check
# the certificate himself, with the user's own validate function.
# if the validate returned True - the user checked the cert
# and we don't need check it, so we return false.
if validate:
return not validate(certificate)
return True | returns true if connection should verify certificate |
def enqueue_conversion_path(url_string, to_type, enqueue_convert):
'''
Given a URL string that has already been downloaded, enqueue
necessary conversion to get to target type
'''
target_ts = TypeString(to_type)
foreign_res = ForeignResource(url_string)
# Determine the file type of the foreign resource
typed_foreign_res = foreign_res.guess_typed()
if not typed_foreign_res.cache_exists():
# Symlink to new location that includes typed extension
typed_foreign_res.symlink_from(foreign_res)
# Now find path between types
original_ts = typed_foreign_res.typestring
path = singletons.converter_graph.find_path(original_ts, target_ts)
# Loop through each step in graph path and convert
is_first = True
for converter_class, from_ts, to_ts in path:
converter = converter_class()
in_resource = TypedResource(url_string, from_ts)
if is_first: # Ensure first resource is just the source one
in_resource = TypedForeignResource(url_string, from_ts)
out_resource = TypedResource(url_string, to_ts)
enqueue_convert(converter, in_resource, out_resource)
is_first = False | Given a URL string that has already been downloaded, enqueue
necessary conversion to get to target type |
def _update_key(self, mask, key):
"""
Mask the value contained in the DataStore at a specified key.
Parameters
-----------
mask: (n,) int
(n,) bool
key: hashable object, in self._data
"""
mask = np.asanyarray(mask)
if key in self._data:
self._data[key] = self._data[key][mask] | Mask the value contained in the DataStore at a specified key.
Parameters
-----------
mask: (n,) int
(n,) bool
key: hashable object, in self._data |
def rows2skip(self, decdel):
"""
Return the number of rows to skip based on the decimal delimiter
decdel.
When each record start to have the same number of matches, this
is where the data starts. This is the idea. And the number of
consecutive records to have the same number of matches is to be
EQUAL_CNT_REQ.
"""
if decdel == '.':
ms = self.matches_p
elif decdel == ',':
ms = self.matches_c
# else make error...
cnt = row = 0
for val1, val2 in zip(ms, ms[1:]):
# val2 is one element ahead.
row += 1
if val2 == val1 != 0: # 0 is no matches, so it doesn't count.
cnt += 1
else:
cnt = 0
if cnt == EQUAL_CNT_REQ:
break
else:
# print 'No break-out for', decdel, 'cnt:', cnt
pass
self.cnt = cnt
return row - EQUAL_CNT_REQ | Return the number of rows to skip based on the decimal delimiter
decdel.
When each record start to have the same number of matches, this
is where the data starts. This is the idea. And the number of
consecutive records to have the same number of matches is to be
EQUAL_CNT_REQ. |
def json_item(model, target=None, theme=FromCurdoc):
''' Return a JSON block that can be used to embed standalone Bokeh content.
Args:
model (Model) :
The Bokeh object to embed
target (string, optional)
A div id to embed the model into. If None, the target id must
be supplied in the JavaScript call.
theme (Theme, optional) :
Defaults to the ``Theme`` instance in the current document.
Setting this to ``None`` uses the default theme or the theme
already specified in the document. Any other value must be an
instance of the ``Theme`` class.
Returns:
JSON-like
This function returns a JSON block that can be consumed by the BokehJS
function ``Bokeh.embed.embed_item``. As an example, a Flask endpoint for
``/plot`` might return the following content to embed a Bokeh plot into
a div with id *"myplot"*:
.. code-block:: python
@app.route('/plot')
def plot():
p = make_plot('petal_width', 'petal_length')
return json.dumps(json_item(p, "myplot"))
Then a web page can retrieve this JSON and embed the plot by calling
``Bokeh.embed.embed_item``:
.. code-block:: html
<script>
fetch('/plot')
.then(function(response) { return response.json(); })
.then(function(item) { Bokeh.embed.embed_item(item); })
</script>
Alternatively, if is more convenient to supply the target div id directly
in the page source, that is also possible. If `target_id` is omitted in the
call to this function:
.. code-block:: python
return json.dumps(json_item(p))
Then the value passed to ``embed_item`` is used:
.. code-block:: javascript
Bokeh.embed.embed_item(item, "myplot");
'''
with OutputDocumentFor([model], apply_theme=theme) as doc:
doc.title = ""
docs_json = standalone_docs_json([model])
doc = list(docs_json.values())[0]
root_id = doc['roots']['root_ids'][0]
return {
'target_id' : target,
'root_id' : root_id,
'doc' : doc,
} | Return a JSON block that can be used to embed standalone Bokeh content.
Args:
model (Model) :
The Bokeh object to embed
target (string, optional)
A div id to embed the model into. If None, the target id must
be supplied in the JavaScript call.
theme (Theme, optional) :
Defaults to the ``Theme`` instance in the current document.
Setting this to ``None`` uses the default theme or the theme
already specified in the document. Any other value must be an
instance of the ``Theme`` class.
Returns:
JSON-like
This function returns a JSON block that can be consumed by the BokehJS
function ``Bokeh.embed.embed_item``. As an example, a Flask endpoint for
``/plot`` might return the following content to embed a Bokeh plot into
a div with id *"myplot"*:
.. code-block:: python
@app.route('/plot')
def plot():
p = make_plot('petal_width', 'petal_length')
return json.dumps(json_item(p, "myplot"))
Then a web page can retrieve this JSON and embed the plot by calling
``Bokeh.embed.embed_item``:
.. code-block:: html
<script>
fetch('/plot')
.then(function(response) { return response.json(); })
.then(function(item) { Bokeh.embed.embed_item(item); })
</script>
Alternatively, if is more convenient to supply the target div id directly
in the page source, that is also possible. If `target_id` is omitted in the
call to this function:
.. code-block:: python
return json.dumps(json_item(p))
Then the value passed to ``embed_item`` is used:
.. code-block:: javascript
Bokeh.embed.embed_item(item, "myplot"); |
def normalize(self, decl_string, arg_separator=None):
"""implementation details"""
if not self.has_pattern(decl_string):
return decl_string
name, args = self.split(decl_string)
for i, arg in enumerate(args):
args[i] = self.normalize(arg)
return self.join(name, args, arg_separator) | implementation details |
def execute(self):
"""
Executes the command.
"""
from vsgen.util.logger import VSGLogger
VSGLogger.info(self._logname, self._message)
start = time.clock()
VSGWriter.write(self._writables, self._parallel)
end = time.clock()
VSGLogger.info(self._logname, "Wrote %s files in %s seconds:", len(self._writables), end - start) | Executes the command. |
def hacking_docstring_start_space(physical_line, previous_logical, tokens):
r"""Check for docstring not starting with space.
OpenStack HACKING guide recommendation for docstring:
Docstring should not start with space
Okay: def foo():\n '''This is good.'''
Okay: def foo():\n r'''This is good.'''
Okay: def foo():\n a = ''' This is not a docstring.'''
Okay: def foo():\n pass\n ''' This is not.'''
H401: def foo():\n ''' This is not.'''
H401: def foo():\n r''' This is not.'''
"""
docstring = is_docstring(tokens, previous_logical)
if docstring:
start, start_triple = _find_first_of(docstring, START_DOCSTRING_TRIPLE)
if docstring[len(start_triple)] == ' ':
# docstrings get tokenized on the last line of the docstring, so
# we don't know the exact position.
return (0, "H401: docstring should not start with"
" a space") | r"""Check for docstring not starting with space.
OpenStack HACKING guide recommendation for docstring:
Docstring should not start with space
Okay: def foo():\n '''This is good.'''
Okay: def foo():\n r'''This is good.'''
Okay: def foo():\n a = ''' This is not a docstring.'''
Okay: def foo():\n pass\n ''' This is not.'''
H401: def foo():\n ''' This is not.'''
H401: def foo():\n r''' This is not.''' |
def dot_product_batched_head(q, k, v, gates_q, gates_k, mask_right=False):
"""Perform a dot product attention on a single sequence on a single head.
This function dispatch the q, k, v and loop over the buckets to compute the
attention dot product on each subsequences.
Args:
q (tf.Tensor): [batch*heads, length_q, depth_q]
k (tf.Tensor): [batch*heads, length_k, depth_q]
v (tf.Tensor): [batch*heads, length_k, depth_v]
gates_q (tf.Tensor): One-hot of shape [batch*heads, length_q, nb_buckets]
gates_k (tf.Tensor): One-hot of shape [batch*heads, length_k, nb_buckets]
mask_right (bool): Add a bias to prevent attention to the future
Returns:
tf.Tensor: [length_q, depth_v]
"""
nb_buckets = common_layers.shape_list(gates_q)[-1]
@expert_utils.add_name_scope()
def get_dispatcher(gates):
"""Construct dispatcher for gates."""
length = common_layers.shape_list(gates)[1]
# Count the number of ones per batch (and keep the max value)
nb_elems_to_dispatch = tf.reduce_sum(gates, axis=[1, 2])
nb_elems_to_dispatch = tf.reduce_max(nb_elems_to_dispatch)
nb_elems_to_dispatch = tf.to_int32(nb_elems_to_dispatch)
capacity = nb_elems_to_dispatch // nb_buckets * 2 # Capacity is hardcoded
capacity = tf.minimum(length, capacity)
tf.summary.scalar("dispatch_capacity", capacity, family="lsh")
return expert_utils.TruncatingDispatcher(gates, capacity)
def add_summary_capacity(x, prefix):
# Monitor if capacity overflow
x = x[0, ...] # Take first batch/head
x = tf.reduce_sum(x, axis=0)
tf.summary.scalar(prefix + "_min", tf.reduce_min(x), family="lsh")
tf.summary.scalar(prefix + "_max", tf.reduce_max(x), family="lsh")
tf.summary.histogram(prefix + "capacity_distribution", x, family="lsh")
for i in range(3): # Show the first 3 buckets
tf.summary.scalar("{}_{}".format(prefix, i), x[i], family="lsh")
add_summary_capacity(gates_q, "q")
add_summary_capacity(gates_k, "k")
q_dispatcher = get_dispatcher(gates_q)
k_dispatcher = get_dispatcher(gates_k)
q = q_dispatcher.dispatch(q)
k = k_dispatcher.dispatch(k)
v = k_dispatcher.dispatch(v)
# Bias of shape [batch*heads, nb_buckets, 1, capacity] broadcasted to every
# queries
bias = tf.expand_dims((k_dispatcher.nonpadding() - 1.0) * 1e9, 2)
if mask_right:
q_coordinate = tf.to_float(
tf.expand_dims(q_dispatcher.length_coordinate(), 3))
k_coordinate = tf.to_float(
tf.expand_dims(k_dispatcher.length_coordinate(), 2))
bias += tf.to_float(tf.greater(k_coordinate, q_coordinate)) * -1e9
# The sequence padding is not masked but is ignored on the next layers
# q, k, v now have shape [batch*heads, nb_bucket, capacity, depth]
# The buckets can be seen as different heads
v_out = dot_product_attention(q, k, v, bias=bias)
# Combine all buckets together to restore the original length
return q_dispatcher.combine(v_out) | Perform a dot product attention on a single sequence on a single head.
This function dispatch the q, k, v and loop over the buckets to compute the
attention dot product on each subsequences.
Args:
q (tf.Tensor): [batch*heads, length_q, depth_q]
k (tf.Tensor): [batch*heads, length_k, depth_q]
v (tf.Tensor): [batch*heads, length_k, depth_v]
gates_q (tf.Tensor): One-hot of shape [batch*heads, length_q, nb_buckets]
gates_k (tf.Tensor): One-hot of shape [batch*heads, length_k, nb_buckets]
mask_right (bool): Add a bias to prevent attention to the future
Returns:
tf.Tensor: [length_q, depth_v] |
def visit_FormattedValue(self, node: AST,
dfltChaining: bool = True) -> str:
"""Return `node`s value formatted according to its format spec."""
format_spec = node.format_spec
return f"{{{self.visit(node.value)}" \
f"{self.CONV_MAP.get(node.conversion, '')}" \
f"{':'+self._nested_str(format_spec) if format_spec else ''}}}" | Return `node`s value formatted according to its format spec. |
def _recurse(self, matrix, m_list, indices, output_m_list=[]):
"""
This method recursively finds the minimal permutations using a binary
tree search strategy.
Args:
matrix: The current matrix (with some permutations already
performed).
m_list: The list of permutations still to be performed
indices: Set of indices which haven't had a permutation
performed on them.
"""
# check to see if we've found all the solutions that we need
if self._finished:
return
# if we're done with the current manipulation, pop it off.
while m_list[-1][1] == 0:
m_list = copy(m_list)
m_list.pop()
# if there are no more manipulations left to do check the value
if not m_list:
matrix_sum = np.sum(matrix)
if matrix_sum < self._current_minimum:
self.add_m_list(matrix_sum, output_m_list)
return
# if we wont have enough indices left, return
if m_list[-1][1] > len(indices.intersection(m_list[-1][2])):
return
if len(m_list) == 1 or m_list[-1][1] > 1:
if self.best_case(matrix, m_list, indices) > self._current_minimum:
return
index = self.get_next_index(matrix, m_list[-1], indices)
m_list[-1][2].remove(index)
# Make the matrix and new m_list where we do the manipulation to the
# index that we just got
matrix2 = np.copy(matrix)
m_list2 = deepcopy(m_list)
output_m_list2 = copy(output_m_list)
matrix2[index, :] *= m_list[-1][0]
matrix2[:, index] *= m_list[-1][0]
output_m_list2.append([index, m_list[-1][3]])
indices2 = copy(indices)
indices2.remove(index)
m_list2[-1][1] -= 1
# recurse through both the modified and unmodified matrices
self._recurse(matrix2, m_list2, indices2, output_m_list2)
self._recurse(matrix, m_list, indices, output_m_list) | This method recursively finds the minimal permutations using a binary
tree search strategy.
Args:
matrix: The current matrix (with some permutations already
performed).
m_list: The list of permutations still to be performed
indices: Set of indices which haven't had a permutation
performed on them. |
def setImagePlotAutoRangeOn(self, axisNumber):
""" Sets the image plot's auto-range on for the axis with number axisNumber.
:param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes).
"""
setXYAxesAutoRangeOn(self, self.xAxisRangeCti, self.yAxisRangeCti, axisNumber) | Sets the image plot's auto-range on for the axis with number axisNumber.
:param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes). |
def isPositiveStrand(self):
"""
Check if this genomic region is on the positive strand.
:return: True if this element is on the positive strand
"""
if self.strand is None and self.DEFAULT_STRAND == self.POSITIVE_STRAND:
return True
return self.strand == self.POSITIVE_STRAND | Check if this genomic region is on the positive strand.
:return: True if this element is on the positive strand |
def to_text_diagram_drawer(
self,
*,
use_unicode_characters: bool = True,
qubit_namer: Optional[Callable[[ops.Qid], str]] = None,
transpose: bool = False,
precision: Optional[int] = 3,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
get_circuit_diagram_info:
Optional[Callable[[ops.Operation,
protocols.CircuitDiagramInfoArgs],
protocols.CircuitDiagramInfo]]=None
) -> TextDiagramDrawer:
"""Returns a TextDiagramDrawer with the circuit drawn into it.
Args:
use_unicode_characters: Determines if unicode characters are
allowed (as opposed to ascii-only diagrams).
qubit_namer: Names qubits in diagram. Defaults to str.
transpose: Arranges qubit wires vertically instead of horizontally.
precision: Number of digits to use when representing numbers.
qubit_order: Determines how qubits are ordered in the diagram.
get_circuit_diagram_info: Gets circuit diagram info. Defaults to
protocol with fallback.
Returns:
The TextDiagramDrawer instance.
"""
qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(
self.all_qubits())
qubit_map = {qubits[i]: i for i in range(len(qubits))}
if qubit_namer is None:
qubit_namer = lambda q: str(q) + ('' if transpose else ': ')
diagram = TextDiagramDrawer()
for q, i in qubit_map.items():
diagram.write(0, i, qubit_namer(q))
moment_groups = [] # type: List[Tuple[int, int]]
for moment in self._moments:
_draw_moment_in_diagram(moment,
use_unicode_characters,
qubit_map,
diagram,
precision,
moment_groups,
get_circuit_diagram_info)
w = diagram.width()
for i in qubit_map.values():
diagram.horizontal_line(i, 0, w)
if moment_groups:
_draw_moment_groups_in_diagram(moment_groups,
use_unicode_characters,
diagram)
if transpose:
diagram = diagram.transpose()
return diagram | Returns a TextDiagramDrawer with the circuit drawn into it.
Args:
use_unicode_characters: Determines if unicode characters are
allowed (as opposed to ascii-only diagrams).
qubit_namer: Names qubits in diagram. Defaults to str.
transpose: Arranges qubit wires vertically instead of horizontally.
precision: Number of digits to use when representing numbers.
qubit_order: Determines how qubits are ordered in the diagram.
get_circuit_diagram_info: Gets circuit diagram info. Defaults to
protocol with fallback.
Returns:
The TextDiagramDrawer instance. |
def _generate_corpus_table(self, labels, ngrams):
"""Returns an HTML table containing data on each corpus' n-grams."""
html = []
for label in labels:
html.append(self._render_corpus_row(label, ngrams))
return '\n'.join(html) | Returns an HTML table containing data on each corpus' n-grams. |
def _prefix_from_ip_int(self, ip_int):
"""Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
"""
trailing_zeroes = _count_righthand_zero_bits(ip_int,
self._max_prefixlen)
prefixlen = self._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = self._max_prefixlen // 8
details = _int_to_bytes(ip_int, byteslen, 'big')
msg = 'Netmask pattern %r mixes zeroes & ones'
raise ValueError(msg % details)
return prefixlen | Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones |
def pattern_for_view(self, view, action):
"""
Returns the URL pattern for the passed in action.
"""
# if this view knows how to define a URL pattern, call that
if getattr(view, 'derive_url_pattern', None):
return view.derive_url_pattern(self.path, action)
# otherwise take our best guess
else:
return r'^%s/%s/$' % (self.path, action) | Returns the URL pattern for the passed in action. |
Subsets and Splits