Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
1,500 | def handle_put_user(self, req):
account = req.path_info_pop()
user = req.path_info_pop()
key = unquote(req.headers.get(, ))
key_hash = unquote(req.headers.get(, ))
admin = req.headers.get() ==
reseller_admin = \
req.headers.get() ==
if reseller_admin:
admin = True
if req.path_info or not account or account[0] == or not user or \
user[0] == or (not key and not key_hash):
return HTTPBadRequest(request=req)
if key_hash:
try:
swauth.authtypes.validate_creds(key_hash)
except ValueError:
return HTTPBadRequest(request=req)
user_arg = account + + user
if reseller_admin:
if not self.is_super_admin(req) and\
not self.is_user_changing_own_key(req, user_arg):
return self.denied_response(req)
elif not self.is_account_admin(req, account) and\
not self.is_user_changing_own_key(req, user_arg):
return self.denied_response(req)
path = quote( % (self.auth_account, account))
resp = self.make_pre_authed_request(
req.environ, , path).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception( %
(path, resp.status))
headers = {:
resp.headers[]}
path = quote( % (self.auth_account, account, user))
groups = [ % (account, user), account]
if admin:
groups.append()
if reseller_admin:
groups.append()
auth_value = key_hash or self.auth_encoder().encode(key)
resp = self.make_pre_authed_request(
req.environ, , path,
json.dumps({: auth_value,
: [{: g} for g in groups]}),
headers=headers).get_response(self.app)
if resp.status_int == 404:
return HTTPNotFound(request=req)
if resp.status_int // 100 != 2:
raise Exception( %
(path, resp.status))
return HTTPCreated(request=req) | Handles the PUT v2/<account>/<user> call for adding a user to an
account.
X-Auth-User-Key represents the user's key (url encoded),
- OR -
X-Auth-User-Key-Hash represents the user's hashed key (url encoded),
X-Auth-User-Admin may be set to `true` to create an account .admin, and
X-Auth-User-Reseller-Admin may be set to `true` to create a
.reseller_admin.
Creating users
**************
Can only be called by an account .admin unless the user is to be a
.reseller_admin, in which case the request must be by .super_admin.
Changing password/key
*********************
1) reseller_admin key can be changed by super_admin and by himself.
2) admin key can be changed by any admin in same account,
reseller_admin, super_admin and himself.
3) Regular user key can be changed by any admin in his account,
reseller_admin, super_admin and himself.
:param req: The swob.Request to process.
:returns: swob.Response, 2xx on success. |
1,501 | def asr_breaking(self, tol_eigendisplacements=1e-5):
for i in range(self.nb_qpoints):
if np.allclose(self.qpoints[i].frac_coords, (0, 0, 0)):
if self.has_eigendisplacements:
acoustic_modes_index = []
for j in range(self.nb_bands):
eig = self.eigendisplacements[j][i]
if np.max(np.abs(eig[1:] - eig[:1])) < tol_eigendisplacements:
acoustic_modes_index.append(j)
if len(acoustic_modes_index) != 3:
acoustic_modes_index = [0, 1, 2]
return self.bands[acoustic_modes_index, i]
else:
return self.bands[:3, i]
return None | Returns the breaking of the acoustic sum rule for the three acoustic modes,
if Gamma is present. None otherwise.
If eigendisplacements are available they are used to determine the acoustic
modes: selects the bands corresponding to the eigendisplacements that
represent to a translation within tol_eigendisplacements. If these are not
identified or eigendisplacements are missing the first 3 modes will be used
(indices [0:3]). |
1,502 | def rename(self, old_fieldname, new_fieldname):
if old_fieldname not in self:
raise Exception("DataTable does not have field `%s`" %
old_fieldname)
if not isinstance(new_fieldname, basestring):
raise ValueError("DataTable fields must be strings, not `%s`" %
type(new_fieldname))
if old_fieldname == new_fieldname:
return
new_names = self.fields
location = new_names.index(old_fieldname)
del new_names[location]
new_names.insert(location, new_fieldname)
self.fields = new_names | Renames a specific field, and preserves the underlying order. |
1,503 | def _post_fork_init(self):
d after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you
ropts = dict(self.opts)
ropts[] = True
runner_client = salt.runner.RunnerClient(ropts)
self.returners = salt.loader.returners(self.opts, {})
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
self.event = salt.utils.event.get_master_event(self.opts, self.opts[], listen=False)
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
if self.opts.get(, False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != :
tcp_only = False
if not tcp_only:
self.presence_events = True | Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked." |
1,504 | def price_diff(self):
res = self.price.groupby(level=1).apply(lambda x: x.diff(1))
res.name =
return res | 返回DataStruct.price的一阶差分 |
1,505 | def _offset_setup(self,sigangle,leading,deltaAngleTrack):
self._sigjr= (self._progenitor.rap()-self._progenitor.rperi())/numpy.pi*self._sigv
self._siglz= self._progenitor.rperi()*self._sigv
self._sigjz= 2.*self._progenitor.zmax()/numpy.pi*self._sigv
self._sigjmatrix= numpy.diag([self._sigjr**2.,
self._siglz**2.,
self._sigjz**2.])
self._sigomatrix= numpy.dot(self._dOdJp,
numpy.dot(self._sigjmatrix,self._dOdJp.T))
self._sigomatrixEig= numpy.linalg.eig(self._sigomatrix)
self._sigomatrixEigsortIndx= numpy.argsort(self._sigomatrixEig[0])
self._sortedSigOEig= sorted(self._sigomatrixEig[0])
if sigangle is None:
self._sigangle= self._sigv*1.8
else:
self._sigangle= sigangle
self._sigangle2= self._sigangle**2.
self._lnsigangle= numpy.log(self._sigangle)
self._dsigomeanProgDirection= self._sigomatrixEig[1][:,numpy.argmax(self._sigomatrixEig[0])]
self._progenitor_Omega_along_dOmega= \
numpy.dot(self._progenitor_Omega,self._dsigomeanProgDirection)
self._leading= leading
self._sigMeanSign= 1.
if self._leading and self._progenitor_Omega_along_dOmega < 0.:
self._sigMeanSign= -1.
elif not self._leading and self._progenitor_Omega_along_dOmega > 0.:
self._sigMeanSign= -1.
self._progenitor_Omega_along_dOmega*= self._sigMeanSign
self._sigomean= self._progenitor_Omega\
+self._sigMeanOffset*self._sigMeanSign\
*numpy.sqrt(numpy.amax(self._sigomatrixEig[0]))\
*self._dsigomeanProgDirection
self._dsigomeanProg= self._sigomean-self._progenitor_Omega
self._meandO= self._sigMeanOffset\
*numpy.sqrt(numpy.amax(self._sigomatrixEig[0]))
self._sigomatrixNorm=\
numpy.sqrt(numpy.sum(self._sigomatrix**2.))
self._sigomatrixinv, self._sigomatrixLogdet= \
fast_cholesky_invert(self._sigomatrix/self._sigomatrixNorm,
tiny=10.**-15.,logdet=True)
self._sigomatrixinv/= self._sigomatrixNorm
deltaAngleTrackLim = (self._sigMeanOffset+4.) * numpy.sqrt(
self._sortedSigOEig[2]) * self._tdisrupt
if (deltaAngleTrack is None):
deltaAngleTrack = deltaAngleTrackLim
else:
if (deltaAngleTrack > deltaAngleTrackLim):
warnings.warn("WARNING: angle range large compared to plausible value.", galpyWarning)
self._deltaAngleTrack= deltaAngleTrack
return None | The part of the setup related to calculating the stream/progenitor offset |
1,506 | def download(url, filename=None, print_progress=0, delete_fail=True,
**kwargs):
blocksize = 1024 * 1024
downloaded = 0
progress = None
log.info(, url)
response = open_url(url, **kwargs)
if not filename:
filename = os.path.basename(url)
output = None
try:
total = int(response.headers[])
if print_progress:
progress = ProgressBar(print_progress, total)
with tempfile.NamedTemporaryFile(
prefix=filename + , dir=, delete=False) as output:
while downloaded < total:
block = response.read(blocksize)
output.write(block)
downloaded += len(block)
if progress:
progress.update(downloaded)
os.rename(output.name, filename)
output = None
return filename
finally:
response.close()
if delete_fail and output:
os.unlink(output.name) | Download a file, optionally printing a simple progress bar
url: The URL to download
filename: The filename to save to, default is to use the URL basename
print_progress: The length of the progress bar, use 0 to disable
delete_fail: If True delete the file if the download was not successful,
default is to keep the temporary file
return: The downloaded filename |
1,507 | def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ):
if config_dir is None:
config_dir = get_config_dir()
tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir )
res = gpg_stash_key( "decrypt", sender_key_info[], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {: % sender_key_info[]}
try:
my_privkey = gpg_export_key( my_key_info[], my_key_info[], include_private=True, config_dir=config_dir )
except:
shutil.rmtree(tmpdir)
return {: % my_key_info[]}
res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {: }
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != :
log.debug("decrypt_file: %s" % res.__dict__)
return {: }
log.debug("decryption succeeded from keys in %s" % config_dir)
return {: True} | Decrypt a stream of data using key info
for a private key we own.
@my_key_info and @sender_key_info should be data returned by gpg_app_get_key
{
'key_id': ...
'key_data': ...
'app_name': ...
}
Return {'status': True, 'sig': ...} on success
Return {'status': True} on succes
Return {'error': ...} on error |
1,508 | def on_connect(client):
print "++ Opened connection to %s" % client.addrport()
broadcast( % client.addrport() )
CLIENT_LIST.append(client)
client.send("Welcome to the Chat Server, %s.\n" % client.addrport() ) | Sample on_connect function.
Handles new connections. |
1,509 | def shrank(self, block=None, percent_diff=0, abs_diff=1):
if block is None:
block = self.block
cur_nets = len(block.logic)
net_goal = self.prev_nets * (1 - percent_diff) - abs_diff
less_nets = (cur_nets <= net_goal)
self.prev_nets = cur_nets
return less_nets | Returns whether a block has less nets than before
:param Block block: block to check (if changed)
:param Number percent_diff: percentage difference threshold
:param int abs_diff: absolute difference threshold
:return: boolean
This function checks whether the change in the number of
nets is greater than the percentage and absolute difference
thresholds. |
1,510 | def dictToFile(dictionary,replicateKey,outFileName):
replicateToFile=h5py.File(outFileName,"w")
for i in range(len(dictionary[replicateKey])):
replicateToFile.create_dataset("{}".format(dictionary[replicateKey].keys()[i])\
,data=dictionary[replicateKey].values()[i]\
,compression="gzip")
replicateToFile.close() | Function to write dictionary data, from subsampleReplicates, to file an hdf5 file.
:param dictionary: nested dictionary returned by subsampleReplicates
:param replicateKey: string designating the replicate written to file
:param outFileName: string defining the hdf5 filename |
1,511 | def delete_model(self, meta: dict):
bucket = self.connect()
if bucket is None:
raise BackendRequiredError
blob_name = "models/%s/%s.asdf" % (meta["model"], meta["uuid"])
self._log.info(blob_name)
try:
self._log.info("Deleting model ...")
bucket.delete_blob(blob_name)
except NotFound:
self._log.warning("Model %s already deleted.", meta["uuid"]) | Delete the model from GCS. |
1,512 | def diffmap(adata, n_comps=15, copy=False):
if not in adata.uns:
raise ValueError(
)
if n_comps <= 2:
raise ValueError(
)
adata = adata.copy() if copy else adata
_diffmap(adata, n_comps=n_comps)
return adata if copy else None | Diffusion Maps [Coifman05]_ [Haghverdi15]_ [Wolf18]_.
Diffusion maps [Coifman05]_ has been proposed for visualizing single-cell
data by [Haghverdi15]_. The tool uses the adapted Gaussian kernel suggested
by [Haghverdi16]_ in the implementation of [Wolf18]_.
The width ("sigma") of the connectivity kernel is implicitly determined by
the number of neighbors used to compute the single-cell graph in
:func:`~scanpy.api.neighbors`. To reproduce the original implementation
using a Gaussian kernel, use `method=='gauss'` in
:func:`~scanpy.api.neighbors`. To use an exponential kernel, use the default
`method=='umap'`. Differences between these options shouldn't usually be
dramatic.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
n_comps : `int`, optional (default: 15)
The number of dimensions of the representation.
copy : `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_diffmap** : :class:`numpy.ndarray` (`adata.obsm`)
Diffusion map representation of data, which is the right eigen basis of
the transition matrix with eigenvectors as columns.
**diffmap_evals** : :class:`numpy.ndarray` (`adata.uns`)
Array of size (number of eigen vectors). Eigenvalues of transition matrix. |
1,513 | def _wait_until(obj, att, desired, callback, interval, attempts, verbose,
verbose_atts):
if not isinstance(desired, (list, tuple)):
desired = [desired]
if verbose_atts is None:
verbose_atts = []
if not isinstance(verbose_atts, (list, tuple)):
verbose_atts = [verbose_atts]
infinite = (attempts == 0)
attempt = 0
start = time.time()
while infinite or (attempt < attempts):
try:
obj.get()
except AttributeError:
try:
attval = getattr(obj, att)
if verbose:
elapsed = time.time() - start
msgs = ["Current value of %s: %s (elapsed: %4.1f seconds)" % (
att, attval, elapsed)]
for vatt in verbose_atts:
vattval = getattr(obj, vatt, None)
msgs.append("%s=%s" % (vatt, vattval))
print(" ".join(msgs))
if attval in desired:
return obj
time.sleep(interval)
attempt += 1
return obj | Loops until either the desired value of the attribute is reached, or the
number of attempts is exceeded. |
1,514 | def get_user_for_membersuite_entity(membersuite_entity):
user = None
user_created = False
user_username = generate_username(membersuite_entity)
try:
user = User.objects.get(username=user_username)
except User.DoesNotExist:
pass
if not user:
try:
user = User.objects.filter(
email=membersuite_entity.email_address)[0]
except IndexError:
pass
if not user:
user = User.objects.create(
username=user_username,
email=membersuite_entity.email_address,
first_name=membersuite_entity.first_name,
last_name=membersuite_entity.last_name)
user_created = True
return user, user_created | Returns a User for `membersuite_entity`.
membersuite_entity is any MemberSuite object that has the fields
membersuite_id, email_address, first_name, and last_name, e.g.,
PortalUser or Individual. |
1,515 | def _raise_error_if_not_of_type(arg, expected_type, arg_name=None):
display_name = "%s " % arg_name if arg_name is not None else "Argument "
lst_expected_type = [expected_type] if \
type(expected_type) == type else expected_type
err_msg = "%smust be of type %s " % (display_name,
.join([x.__name__ for x in lst_expected_type]))
err_msg += "(not %s)." % type(arg).__name__
if not any(map(lambda x: isinstance(arg, x), lst_expected_type)):
raise TypeError(err_msg) | Check if the input is of expected type.
Parameters
----------
arg : Input argument.
expected_type : A type OR a list of types that the argument is expected
to be.
arg_name : The name of the variable in the function being used. No
name is assumed if set to None.
Examples
--------
_raise_error_if_not_of_type(sf, str, 'sf')
_raise_error_if_not_of_type(sf, [str, int], 'sf') |
1,516 | def _add_new_items(self, config, seen):
for (key, value) in self.items():
if key not in seen:
self._set_value(config, key, value) | Add new (unseen) items to the config. |
1,517 | def _check_series_localize_timestamps(s, timezone):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64tz_dtype
tz = timezone or _get_local_timezone()
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(tz).dt.tz_localize(None)
else:
return s | Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
:param s: pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series that have been converted to tz-naive |
1,518 | def getStats(self):
recordStoreStats = self._recordStore.getStats()
streamStats = dict()
for (key, values) in recordStoreStats.items():
fieldStats = dict(zip(self._recordStoreFieldNames, values))
streamValues = []
for name in self._streamFieldNames:
streamValues.append(fieldStats[name])
streamStats[key] = streamValues
return streamStats | TODO: This method needs to be enhanced to get the stats on the *aggregated*
records.
:returns: stats (like min and max values of the fields). |
1,519 | def _process_state_embryo(self, job_record):
start_timeperiod = self.compute_start_timeperiod(job_record.process_name, job_record.timeperiod)
end_timeperiod = self.compute_end_timeperiod(job_record.process_name, job_record.timeperiod)
self._compute_and_transfer_to_progress(job_record.process_name, start_timeperiod,
end_timeperiod, job_record) | method that takes care of processing job records in STATE_EMBRYO state |
1,520 | def channels(self):
try:
return self._channels
except AttributeError:
logger.debug("initialize output channels ...")
channels = self.args.channels
config_channels = [sec.rpartition()[0] for sec in self.config.sections(suffix=)]
unknown = set(channels) - set(config_channels)
if unknown:
raise ValueError("undefined channel %r" % list(unknown))
output_channels = []
for channel in set(channels):
channel_type = self.config.get( % channel, )
if channel_type == :
output_channels.append(TermChannel(channel, self.args, self.config))
elif channel_type == :
output_channels.append(FileChannel(channel, self.args, self.config))
elif channel_type == :
output_channels.append(MailChannel(channel, self.args, self.config))
else:
raise LogRaptorConfigError( % channel_type)
return output_channels | Output channels |
1,521 | def _set_auth_type(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=auth_type.auth_type, is_container=, presence=False, yang_name="auth-type", rest_name="auth-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__auth_type = t
if hasattr(self, ):
self._set() | Setter method for auth_type, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_vrrp_extended/auth_type (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_auth_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_auth_type() directly.
YANG Description: Authentication type |
1,522 | def _parallel_exec(self, hosts):
if not hosts:
return
p = multiprocessing.Pool(self.forks)
results = []
results = p.map_async(multiprocessing_runner, hosts).get(9999999)
p.close()
p.join()
return results | handles mulitprocessing when more than 1 fork is required |
1,523 | def _proc_gnusparse_01(self, next, pax_headers):
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2])) | Process a GNU tar extended sparse header, version 0.1. |
1,524 | def pin_ls(self, type="all", **kwargs):
kwargs.setdefault("opts", {"type": type})
return self._client.request(, decoder=, **kwargs) | Lists objects pinned to local storage.
By default, all pinned objects are returned, but the ``type`` flag or
arguments can restrict that to a specific pin type or to some specific
objects respectively.
.. code-block:: python
>>> c.pin_ls()
{'Keys': {
'QmNNPMA1eGUbKxeph6yqV8ZmRkdVat … YMuz': {'Type': 'recursive'},
'QmNPZUCeSN5458Uwny8mXSWubjjr6J … kP5e': {'Type': 'recursive'},
'QmNg5zWpRMxzRAVg7FTQ3tUxVbKj8E … gHPz': {'Type': 'indirect'},
…
'QmNiuVapnYCrLjxyweHeuk6Xdqfvts … wCCe': {'Type': 'indirect'}}}
Parameters
----------
type : "str"
The type of pinned keys to list. Can be:
* ``"direct"``
* ``"indirect"``
* ``"recursive"``
* ``"all"``
Returns
-------
dict : Hashes of pinned IPFS objects and why they are pinned |
1,525 | def configure_logging(args):
log_format = logging.Formatter()
log_level = logging.INFO if args.verbose else logging.WARN
log_level = logging.DEBUG if args.debug else log_level
console = logging.StreamHandler()
console.setFormatter(log_format)
console.setLevel(log_level)
root_logger = logging.getLogger()
if len(root_logger.handlers) == 0:
root_logger.addHandler(console)
root_logger.setLevel(log_level)
root_logger.handlers[0].setFormatter(log_format)
logging.getLogger(__name__) | Logging to console |
1,526 | def copy(self):
"Return a clone of this hash object."
other = _ChainedHashAlgorithm(self._algorithms)
other._hobj = deepcopy(self._hobj)
other._fobj = deepcopy(self._fobj)
return other | Return a clone of this hash object. |
1,527 | def token(self):
" Get token when needed."
if hasattr(self, ):
return getattr(self, )
data = json.dumps({: self.customer,
: self.username,
: self.password})
response = requests.post(
, data=data,
headers={: })
content = json.loads(response.content)
if response.status_code != 200:
if self.check_error(content, , ):
raise self.CredentialsError(
self.response_message(content, ))
raise self.Failure(self.response_message(content, ),
)
if in content and in content[]:
token = content[][]
else:
raise self.AuthenticationError(response)
setattr(self, , token)
return token | Get token when needed. |
1,528 | def get_grade_systems_by_genus_type(self, grade_system_genus_type):
collection = JSONClientValidated(,
collection=,
runtime=self._runtime)
result = collection.find(
dict({: str(grade_system_genus_type)},
**self._view_filter())).sort(, DESCENDING)
return objects.GradeSystemList(result, runtime=self._runtime, proxy=self._proxy) | Gets a ``GradeSystemList`` corresponding to the given grade system genus ``Type`` which does not include systems of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known systems or
an error results. Otherwise, the returned list may contain only
those systems that are accessible through this session.
arg: grade_system_genus_type (osid.type.Type): a grade system
genus type
return: (osid.grading.GradeSystemList) - the returned
``GradeSystem`` list
raise: NullArgument - ``grade_system_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
1,529 | def list_logs(args, container_name=None):
from sregistry.main import Client as cli
if len(args.commands) > 0:
container_name = args.commands.pop(0)
cli.logs(container_name)
sys.exit(0) | list a specific log for a builder, or the latest log if none provided
Parameters
==========
args: the argparse object to look for a container name
container_name: a default container name set to be None (show latest log) |
1,530 | def print_commands(self, out=sys.stdout):
cmds = self.list_commands()
for ck in cmds:
if ck.printable:
out.write( % ck) | utility method to print commands
and descriptions for @BotFather |
1,531 | def build_common_all_meta_df(common_meta_dfs, fields_to_remove, remove_all_metadata_fields):
if remove_all_metadata_fields:
trimmed_common_meta_dfs = [pd.DataFrame(index=df.index) for df in common_meta_dfs]
else:
shared_column_headers = sorted(set.intersection(*[set(df.columns) for df in common_meta_dfs]))
logger.debug("shared_column_headers: {}".format(shared_column_headers))
trimmed_common_meta_dfs = [df[shared_column_headers] for df in common_meta_dfs]
for df in trimmed_common_meta_dfs:
df.drop(fields_to_remove, axis=1, errors="ignore", inplace=True)
all_meta_df_with_dups = pd.concat(trimmed_common_meta_dfs, axis=0)
logger.debug("all_meta_df_with_dups.shape: {}".format(all_meta_df_with_dups.shape))
logger.debug("all_meta_df_with_dups.columns: {}".format(all_meta_df_with_dups.columns))
logger.debug("all_meta_df_with_dups.index: {}".format(all_meta_df_with_dups.index))
if all_meta_df_with_dups.empty:
all_meta_df = pd.DataFrame(index=all_meta_df_with_dups.index.unique())
else:
all_meta_df_with_dups["concat_column_for_index"] = all_meta_df_with_dups.index
all_meta_df = all_meta_df_with_dups.copy(deep=True).drop_duplicates()
all_meta_df.drop("concat_column_for_index", axis=1, inplace=True)
all_meta_df_with_dups.drop("concat_column_for_index", axis=1, inplace=True)
logger.debug("all_meta_df_with_dups.shape: {}".format(all_meta_df_with_dups.shape))
logger.debug("all_meta_df.shape: {}".format(all_meta_df.shape))
return (all_meta_df, all_meta_df_with_dups) | concatenate the entries in common_meta_dfs, removing columns selectively (fields_to_remove) or entirely (
remove_all_metadata_fields=True; in this case, effectively just merges all the indexes in common_meta_dfs).
Returns 2 dataframes (in a tuple): the first has duplicates removed, the second does not.
Args:
common_meta_dfs: collection of pandas DataFrames containing the metadata in the "common" direction of the
concatenation operation
fields_to_remove: columns to be removed (if present) from the common_meta_dfs
remove_all_metadata_fields: boolean indicating that all metadata fields should be removed from the
common_meta_dfs; overrides fields_to_remove if present
Returns:
tuple containing
all_meta_df: pandas dataframe that is the concatenation of the dataframes in common_meta_dfs,
all_meta_df_with_dups: |
1,532 | def _ls_print_summary(all_trainings: List[Tuple[str, dict, TrainingTrace]]) -> None:
counts_by_name = defaultdict(int)
counts_by_classes = defaultdict(int)
for _, config, _ in all_trainings:
counts_by_name[get_model_name(config)] += 1
counts_by_classes[get_classes(config)] += 1
print_boxed()
print()
counts_table = [[name, count] for name, count in counts_by_name.items()]
print(tabulate(counts_table, headers=[, ], tablefmt=))
print()
counts_table = [[classes[0], classes[1], count] for classes, count in counts_by_classes.items()]
print(tabulate(counts_table, headers=[, , ], tablefmt=))
print() | Print trainings summary.
In particular print tables summarizing the number of trainings with
- particular model names
- particular combinations of models and datasets
:param all_trainings: a list of training tuples (train_dir, configuration dict, trace) |
1,533 | def collect(nested_nodes, transform=None):
items = []
if transform is None:
transform = lambda node, parents, nodes, *args: node
def __collect__(node, parents, nodes, first, last, depth):
items.append(transform(node, parents, nodes, first, last, depth))
apply_depth_first(nested_nodes, __collect__)
return items | Return list containing the result of the `transform` function applied to
each item in the supplied list of nested nodes.
A custom transform function may be applied to each entry during the
flattening by specifying a function through the `transform` keyword
argument. The `transform` function will be passed the following arguments:
- `node`: The node/key of the entry.
- `parents`: The node/key of the parents as a `list`.
- `nodes`: The children of the entry.
By default, the `transform` function simply returns the node/key, resulting
in a flattened version of the original nested nodes structure. |
1,534 | def task_done(self, **kw):
def validate(task):
if not Status.is_pending(task[]):
raise ValueError("Task is not pending.")
return self._task_change_status(Status.COMPLETED, validate, **kw) | Marks a pending task as done, optionally specifying a completion
date with the 'end' argument. |
1,535 | def comment(self, text):
url = self._imgur._base_url + "/3/comment"
payload = {: self.id, : text}
resp = self._imgur._send_request(url, params=payload, needs_auth=True,
method=)
return Comment(resp, imgur=self._imgur, has_fetched=False) | Make a top-level comment to this.
:param text: The comment text. |
1,536 | def get_imports(self, option):
if option:
if len(option) == 1 and option[0].isupper() and len(option[0]) > 3:
return getattr(settings, option[0])
else:
codes = [e for e in option if e.isupper() and len(e) == 3]
if len(codes) != len(option):
raise ImproperlyConfigured("Invalid currency codes found: %s" % codes)
return codes
for attr in (, ):
try:
return getattr(settings, attr)
except AttributeError:
continue
return option | See if we have been passed a set of currencies or a setting variable
or look for settings CURRENCIES or SHOP_CURRENCIES. |
1,537 | def attach_volume(self, xml_bytes):
root = XML(xml_bytes)
status = root.findtext("status")
attach_time = root.findtext("attachTime")
attach_time = datetime.strptime(
attach_time[:19], "%Y-%m-%dT%H:%M:%S")
return {"status": status, "attach_time": attach_time} | Parse the XML returned by the C{AttachVolume} function.
@param xml_bytes: XML bytes with a C{AttachVolumeResponse} root
element.
@return: a C{dict} with status and attach_time keys.
TODO: volumeId, instanceId, device |
1,538 | def to_xml(self):
if self.alias_hosted_zone_id != None and self.alias_dns_name != None:
body = self.AliasBody % (self.alias_hosted_zone_id, self.alias_dns_name)
else:
records = ""
for r in self.resource_records:
records += self.ResourceRecordBody % r
body = self.ResourceRecordsBody % {
"ttl": self.ttl,
"records": records,
}
weight = ""
if self.identifier != None and self.weight != None:
weight = self.WRRBody % {"identifier": self.identifier, "weight":
self.weight}
params = {
"name": self.name,
"type": self.type,
"weight": weight,
"body": body,
}
return self.XMLBody % params | Spit this resource record set out as XML |
1,539 | def register_dimensions(self, dims):
if isinstance(dims, collections.Mapping):
dims = dims.itervalues()
for dim in dims:
self.register_dimension(dim.name, dim) | Register multiple dimensions on the cube.
.. code-block:: python
cube.register_dimensions([
{'name' : 'ntime', 'global_size' : 10,
'lower_extent' : 2, 'upper_extent' : 7 },
{'name' : 'na', 'global_size' : 3,
'lower_extent' : 2, 'upper_extent' : 7 },
])
Parameters
----------
dims : list or dict
A list or dictionary of dimensions |
1,540 | def from_unidiff(cls, diff: str) -> :
lines = diff.split()
file_patches = []
while lines:
if lines[0] == or lines[0].isspace():
lines.pop(0)
continue
file_patches.append(FilePatch._read_next(lines))
return Patch(file_patches) | Constructs a Patch from a provided unified format diff. |
1,541 | def restore_review_history_for_affected_objects(portal):
logger.info("Restoring review_history ...")
query = dict(portal_type=NEW_SENAITE_WORKFLOW_BINDINGS)
brains = api.search(query, UID_CATALOG)
total = len(brains)
done = 0
for num, brain in enumerate(brains):
if num % 100 == 0:
logger.info("Restoring review_history: {}/{}"
.format(num, total))
review_history = api.get_review_history(brain, rev=False)
if review_history:
continue
restore_review_history_for(brain)
done += 1
if done % 1000 == 0:
commit_transaction(portal)
logger.info("Restoring review history: {} processed [DONE]".format(done)) | Applies the review history for objects that are bound to new senaite_*
workflows |
1,542 | def LOS_CrossProj(VType, Ds, us, kPIns, kPOuts, kRMins,
Lplot=, proj=, multi=False):
assert type(VType) is str and VType.lower() in [,]
assert Lplot.lower() in [,]
assert type(proj) is str
proj = proj.lower()
assert proj in [,,,]
assert Ds.ndim==2 and Ds.shape==us.shape
nL = Ds.shape[1]
k0 = kPIns if Lplot.lower()== else np.zeros((nL,))
if VType.lower()== and proj in [,]:
CrossProjAng = np.arccos(np.sqrt(us[0,:]**2+us[1,:]**2)
/np.sqrt(np.sum(us**2,axis=0)))
nkp = np.ceil(25.*(1 - (CrossProjAng/(np.pi/4)-1)**2) + 2)
ks = np.max([kRMins,kPIns],axis=0) if Lplot.lower()== else kRMins
pts0 = []
if multi:
for ii in range(0,nL):
if np.isnan(kPOuts[ii]):
pts0.append( np.array([[np.nan,np.nan],
[np.nan,np.nan]]) )
else:
k = np.linspace(k0[ii],kPOuts[ii],nkp[ii],endpoint=True)
k = np.unique(np.append(k,ks[ii]))
pp = Ds[:,ii:ii+1] + k[np.newaxis,:]*us[:,ii:ii+1]
pts0.append( np.array([np.hypot(pp[0,:],pp[1,:]),pp[2,:]]) )
else:
for ii in range(0,nL):
if np.isnan(kPOuts[ii]):
pts0.append(np.array([[np.nan,np.nan,np.nan],
[np.nan,np.nan,np.nan],
[np.nan,np.nan,np.nan]]))
else:
k = np.linspace(k0[ii],kPOuts[ii],nkp[ii],endpoint=True)
k = np.append(np.unique(np.append(k,ks[ii])),np.nan)
pts0.append( Ds[:,ii:ii+1] + k[np.newaxis,:]*us[:,ii:ii+1] )
pts0 = np.concatenate(tuple(pts0),axis=1)
pts0 = np.array([np.hypot(pts0[0,:],pts0[1,:]),pts0[2,:]])
if not (VType.lower()== and proj==):
pts = []
if multi:
for ii in range(0,nL):
if np.isnan(kPOuts[ii]):
pts.append( np.array([[np.nan,np.nan],
[np.nan,np.nan],
[np.nan,np.nan]]) )
else:
k = np.array([k0[ii],kPOuts[ii]])
pts.append( Ds[:,ii:ii+1] + k[np.newaxis,:]*us[:,ii:ii+1] )
else:
for ii in range(0,nL):
if np.isnan(kPOuts[ii]):
pts.append(np.array([[np.nan,np.nan,np.nan],
[np.nan,np.nan,np.nan],
[np.nan,np.nan,np.nan]]))
else:
k = np.array([k0[ii],kPOuts[ii],np.nan])
pts.append( Ds[:,ii:ii+1] + k[np.newaxis,:]*us[:,ii:ii+1] )
pts = np.concatenate(tuple(pts),axis=1)
if proj==:
pts = [pp[:2,:] for pp in pts] if multi else pts[:2,:]
elif proj==:
if VType.lower()==:
pts = pts0
else:
pts = [pp[1:,:] for pp in pts] if multi else pts[1:,:]
elif proj==:
if multi:
if VType.lower()==:
pts = [(p0,pp[:2,:]) for (p0,pp) in zip(*[pts0,pts])]
else:
pts = (pts[1:,:],pts[:2,:])
else:
pts = (pts0,pts[:2,:]) if VType.lower()== else (pts[1:,:],pts[:2,:])
return pts | Compute the parameters to plot the poloidal projection of the LOS |
1,543 | def get_cfgdict_list_subset(cfgdict_list, keys):
r
import utool as ut
cfgdict_sublist_ = [ut.dict_subset(cfgdict, keys) for cfgdict in cfgdict_list]
cfgtups_sublist_ = [tuple(ut.dict_to_keyvals(cfgdict)) for cfgdict in cfgdict_sublist_]
cfgtups_sublist = ut.unique_ordered(cfgtups_sublist_)
cfgdict_sublist = list(map(dict, cfgtups_sublist))
return cfgdict_sublist | r"""
returns list of unique dictionaries only with keys specified in keys
Args:
cfgdict_list (list):
keys (list):
Returns:
list: cfglbl_list
CommandLine:
python -m utool.util_gridsearch --test-get_cfgdict_list_subset
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> # build test data
>>> cfgdict_list = [
... {'K': 3, 'dcvs_clip_max': 0.1, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.1, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.1, 'p': 0.2},
... {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.2, 'p': 0.1},
... {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}]
>>> keys = ['K', 'dcvs_clip_max']
>>> # execute function
>>> cfgdict_sublist = get_cfgdict_list_subset(cfgdict_list, keys)
>>> # verify results
>>> result = ut.repr4(cfgdict_sublist)
>>> print(result)
[
{'K': 3, 'dcvs_clip_max': 0.1},
{'K': 5, 'dcvs_clip_max': 0.1},
{'K': 3, 'dcvs_clip_max': 0.2},
{'K': 5, 'dcvs_clip_max': 0.2},
] |
1,544 | def diropenbox(msg=None
, title=None
, default=None
):
if sys.platform == :
_bring_to_front()
title=getFileDialogTitle(msg,title)
localRoot = Tk()
localRoot.withdraw()
if not default: default = None
f = tk_FileDialog.askdirectory(
parent=localRoot
, title=title
, initialdir=default
, initialfile=None
)
localRoot.destroy()
if not f: return None
return os.path.normpath(f) | A dialog to get a directory name.
Note that the msg argument, if specified, is ignored.
Returns the name of a directory, or None if user chose to cancel.
If the "default" argument specifies a directory name, and that
directory exists, then the dialog box will start with that directory. |
1,545 | def dumps(obj, big_endian=True):
geom_type = obj[]
meta = obj.get(, {})
exporter = _dumps_registry.get(geom_type)
if exporter is None:
_unsupported_geom_type(geom_type)
)
return exporter(obj, big_endian, meta) | Dump a GeoJSON-like `dict` to a WKB string.
.. note::
The dimensions of the generated WKB will be inferred from the first
vertex in the GeoJSON `coordinates`. It will be assumed that all
vertices are uniform. There are 4 types:
- 2D (X, Y): 2-dimensional geometry
- Z (X, Y, Z): 3-dimensional geometry
- M (X, Y, M): 2-dimensional geometry with a "Measure"
- ZM (X, Y, Z, M): 3-dimensional geometry with a "Measure"
If the first vertex contains 2 values, we assume a 2D geometry.
If the first vertex contains 3 values, this is slightly ambiguous and
so the most common case is chosen: Z.
If the first vertex contains 4 values, we assume a ZM geometry.
The WKT/WKB standards provide a way of differentiating normal (2D), Z,
M, and ZM geometries (http://en.wikipedia.org/wiki/Well-known_text),
but the GeoJSON spec does not. Therefore, for the sake of interface
simplicity, we assume that geometry that looks 3D contains XYZ
components, instead of XYM.
If the coordinates list has no coordinate values (this includes nested
lists, for example, `[[[[],[]], []]]`, the geometry is considered to be
empty. Geometries, with the exception of points, have a reasonable
"empty" representation in WKB; however, without knowing the number of
coordinate values per vertex, the type is ambigious, and thus we don't
know if the geometry type is 2D, Z, M, or ZM. Therefore in this case
we expect a `ValueError` to be raised.
:param dict obj:
GeoJson-like `dict` object.
:param bool big_endian:
Defaults to `True`. If `True`, data values in the generated WKB will
be represented using big endian byte order. Else, little endian.
TODO: remove this
:param str dims:
Indicates to WKB representation desired from converting the given
GeoJSON `dict` ``obj``. The accepted values are:
* '2D': 2-dimensional geometry (X, Y)
* 'Z': 3-dimensional geometry (X, Y, Z)
* 'M': 3-dimensional geometry (X, Y, M)
* 'ZM': 4-dimensional geometry (X, Y, Z, M)
:returns:
A WKB binary string representing of the ``obj``. |
1,546 | def download(url, path, kind=,
progressbar=True, replace=False, timeout=10., verbose=True):
if kind not in ALLOWED_KINDS:
raise ValueError(.format(
ALLOWED_KINDS, kind))
path = op.expanduser(path)
if len(path) == 0:
raise ValueError()
download_url = _convert_url_to_downloadable(url)
if replace is False and op.exists(path):
msg = (
)
elif kind in ZIP_KINDS:
if not op.isdir(path):
if verbose:
tqdm.write()
os.makedirs(path)
path_temp = _TempDir()
path_temp_file = op.join(path_temp, "tmp.{}".format(kind))
_fetch_file(download_url, path_temp_file, timeout=timeout,
verbose=verbose)
if verbose:
tqdm.write(.format(kind))
if kind == :
zipper = ZipFile
elif kind == :
zipper = tarfile.open
elif kind == :
zipper = partial(tarfile.open, mode=)
with zipper(path_temp_file) as myobj:
myobj.extractall(path)
msg = .format(path)
else:
if not op.isdir(op.dirname(path)):
os.makedirs(op.dirname(path))
_fetch_file(download_url, path, timeout=timeout, verbose=verbose)
msg = .format(path)
if verbose:
tqdm.write(msg)
return path | Download a URL.
This will download a file and store it in a '~/data/` folder,
creating directories if need be. It will also work for zip
files, in which case it will unzip all of the files to the
desired location.
Parameters
----------
url : string
The url of the file to download. This may be a dropbox
or google drive "share link", or a regular URL. If it
is a share link, then it should point to a single file and
not a folder. To download folders, zip them first.
path : string
The path where the downloaded file will be stored. If ``zipfile``
is True, then this must be a folder into which files will be zipped.
kind : one of ['file', 'zip', 'tar', 'tar.gz']
The kind of file to be downloaded. If not 'file', then the file
contents will be unpackaged according to the kind specified. Package
contents will be placed in ``root_destination/<name>``.
progressbar : bool
Whether to display a progress bar during file download.
replace : bool
If True and the URL points to a single file, overwrite the
old file if possible.
timeout : float
The URL open timeout.
verbose : bool
Whether to print download status to the screen.
Returns
-------
out_path : string
A path to the downloaded file (or folder, in the case of
a zip file). |
1,547 | def find_by_id(self, team, params={}, **options):
path = "/teams/%s" % (team)
return self.client.get(path, params, **options) | Returns the full record for a single team.
Parameters
----------
team : {Id} Globally unique identifier for the team.
[params] : {Object} Parameters for the request |
1,548 | def size(self):
return len(self._query_compiler.index) * len(self._query_compiler.columns) | Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame. |
1,549 | def __value_compare(self, target):
if self.expectation == "__ANY__":
return True
elif self.expectation == "__DEFINED__":
return True if target is not None else False
elif self.expectation == "__TYPE__":
return True if type(target) == self.target_type else False
elif self.expectation == "__INSTANCE__":
return True if isinstance(target, self.target_type.__class__) else False
else:
return True if target == self.expectation else False | Comparing result based on expectation if arg_type is "VALUE"
Args: Anything
Return: Boolean |
1,550 | def generate(organization, package, destination):
gen = ResourceGenerator(organization, package)
tmp = tempfile.NamedTemporaryFile(mode=, delete=False)
try:
tmp.write(gen.conf())
finally:
tmp.close()
shutil.copy(tmp.name, os.path.join(destination, ))
tmp = tempfile.NamedTemporaryFile(mode=, delete=False)
try:
tmp.write(gen.makefile())
finally:
tmp.close()
shutil.copy(tmp.name, os.path.join(destination, )) | Generates the Sphinx configuration and Makefile.
Args:
organization (str): the organization name.
package (str): the package to be documented.
destination (str): the destination directory. |
1,551 | def get_random_service(
service_registry: ServiceRegistry,
block_identifier: BlockSpecification,
) -> Tuple[Optional[str], Optional[str]]:
count = service_registry.service_count(block_identifier=block_identifier)
if count == 0:
return None, None
index = random.SystemRandom().randint(0, count - 1)
address = service_registry.get_service_address(
block_identifier=block_identifier,
index=index,
)
assert address,
url = service_registry.get_service_url(
block_identifier=block_identifier,
service_hex_address=address,
)
return url, address | Selects a random PFS from service_registry.
Returns a tuple of the chosen services url and eth address.
If there are no PFS in the given registry, it returns (None, None). |
1,552 | def sm_dict2lha(d):
blocks = OrderedDict([
(, {: [[1, d[].real], [2, d[].real], [3, d[].real]]}),
(, {: [[1, d[].real], [2, d[].real]]}),
(, {: matrix2lha(d[].real)}),
(, {: matrix2lha(d[].imag)}),
(, {: matrix2lha(d[].real)}),
(, {: matrix2lha(d[].imag)}),
(, {: matrix2lha(d[].real)}),
(, {: matrix2lha(d[].imag)}),
(, {: [[1, d[].real], [2, d[].real], [3, d[].real]]}),
])
return {: blocks} | Convert a a dictionary of SM parameters into
a dictionary that pylha can convert into a DSixTools SM output file. |
1,553 | def flush(self, timeout=60):
if timeout <= 0:
raise ErrBadTimeout
if self.is_closed:
raise ErrConnectionClosed
future = asyncio.Future(loop=self._loop)
try:
yield from self._send_ping(future)
yield from asyncio.wait_for(future, timeout, loop=self._loop)
except asyncio.TimeoutError:
future.cancel()
raise ErrTimeout | Sends a ping to the server expecting a pong back ensuring
what we have written so far has made it to the server and
also enabling measuring of roundtrip time.
In case a pong is not returned within the allowed timeout,
then it will raise ErrTimeout. |
1,554 | def coerce(cls, key, value):
if not isinstance(value, MutationDict):
if isinstance(value, dict):
return MutationDict(value)
return Mutable.coerce(key, value)
else:
return value | Convert plain dictionaries to MutationDict. |
1,555 | def open(self):
if self._table_exists():
self.mode = "open"
self._get_table_info()
return self
else:
raise IOError,"Table %s doesn't exist" %self.name | Open an existing database |
1,556 | def check_all_permissions(sender, **kwargs):
if not is_permissions_app(sender):
return
config = getattr(settings, , dict())
for natural_key, permissions in config.items():
if natural_key == :
for content_type in ContentType.objects.all():
for permission in permissions:
add_permission(content_type, permission)
else:
app, model = natural_key.split()
try:
content_type = ContentType.objects.get_by_natural_key(app, model)
except ContentType.DoesNotExist:
continue
for permission in permissions:
add_permission(content_type, permission) | This syncdb checks our PERMISSIONS setting in settings.py and makes sure all those permissions
actually exit. |
1,557 | def _run_operation_with_response(self, operation, unpack_res,
exhaust=False, address=None):
if operation.exhaust_mgr:
server = self._select_server(
operation.read_preference, operation.session, address=address)
with self._reset_on_error(server.description.address,
operation.session):
return server.run_operation_with_response(
operation.exhaust_mgr.sock,
operation,
True,
self._event_listeners,
exhaust,
unpack_res)
def _cmd(session, server, sock_info, slave_ok):
return server.run_operation_with_response(
sock_info,
operation,
slave_ok,
self._event_listeners,
exhaust,
unpack_res)
return self._retryable_read(
_cmd, operation.read_preference, operation.session,
address=address,
retryable=isinstance(operation, message._Query),
exhaust=exhaust) | Run a _Query/_GetMore operation and return a Response.
:Parameters:
- `operation`: a _Query or _GetMore object.
- `unpack_res`: A callable that decodes the wire protocol response.
- `exhaust` (optional): If True, the socket used stays checked out.
It is returned along with its Pool in the Response.
- `address` (optional): Optional address when sending a message
to a specific server, used for getMore. |
1,558 | def mach2cas(M, h):
tas = mach2tas(M, h)
cas = tas2cas(tas, h)
return cas | Mach to CAS conversion |
1,559 | def reset_db():
logger = get_logger(PROCESS_SCHEDULER)
logger.info()
ds = ds_manager.ds_factory(logger)
ds._db_client.drop_database(settings.settings[])
logger.info()
connection = ds.connection(COLLECTION_MANAGED_PROCESS)
connection.create_index([(PROCESS_NAME, pymongo.ASCENDING)], unique=True)
connection = ds.connection(COLLECTION_FREERUN_PROCESS)
connection.create_index([(PROCESS_NAME, pymongo.ASCENDING), (ENTRY_NAME, pymongo.ASCENDING)], unique=True)
connection = ds.connection(COLLECTION_UNIT_OF_WORK)
connection.create_index([(PROCESS_NAME, pymongo.ASCENDING),
(TIMEPERIOD, pymongo.ASCENDING),
(START_ID, pymongo.ASCENDING),
(END_ID, pymongo.ASCENDING)], unique=True)
connection = ds.connection(COLLECTION_LOG_RECORDING)
connection.create_index([(PARENT_OBJECT_ID, pymongo.ASCENDING)], unique=True)
ttl_seconds = settings.settings[] * 86400
connection.create_index(CREATED_AT, expireAfterSeconds=ttl_seconds)
for collection_name in [COLLECTION_JOB_HOURLY, COLLECTION_JOB_DAILY,
COLLECTION_JOB_MONTHLY, COLLECTION_JOB_YEARLY]:
connection = ds.connection(collection_name)
connection.create_index([(PROCESS_NAME, pymongo.ASCENDING), (TIMEPERIOD, pymongo.ASCENDING)], unique=True)
db_manager.reset_db()
logger.info() | drops the *scheduler* database, resets schema |
1,560 | def _set_exp_traffic_class(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("exp_traffic_class_map_name",exp_traffic_class.exp_traffic_class, yang_name="exp-traffic-class", rest_name="exp-traffic-class", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: None, u: u, u: u}}), is_container=, yang_name="exp-traffic-class", rest_name="exp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "list",
: ,
})
self.__exp_traffic_class = t
if hasattr(self, ):
self._set() | Setter method for exp_traffic_class, mapped from YANG variable /qos_mpls/map/exp_traffic_class (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_exp_traffic_class is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_exp_traffic_class() directly. |
1,561 | def resource_type_type(loader):
def impl(string):
t_resources = loader.get_models()
if set(string) - set("sco"):
raise ValueError
return t_resources(_str=.join(set(string)))
return impl | Returns a function which validates that resource types string contains only a combination of service,
container, and object. Their shorthand representations are s, c, and o. |
1,562 | def textContent(self, text: str) -> None:
if self._inner_element:
self._inner_element.textContent = text
else:
super().textContent = text | Set text content to inner node. |
1,563 | def select_window(pymux, variables):
window_id = variables[]
def invalid_window():
raise CommandException( % window_id)
if window_id.startswith():
try:
number = int(window_id[1:])
except ValueError:
invalid_window()
else:
w = pymux.arrangement.get_window_by_index(number)
if w:
pymux.arrangement.set_active_window(w)
else:
invalid_window()
else:
invalid_window() | Select a window. E.g: select-window -t :3 |
1,564 | def save(self, filename, clobber=True, **kwargs):
if os.path.exists(filename) and not clobber:
raise IOError("filename exists and we have been asked not to"\
" clobber it".format(filename))
if not filename.endswith():
data = np.hstack([
self.disp.reshape(-1, 1),
self.flux.reshape(-1, 1),
self.variance.reshape(-1, 1)
])
return np.savetxt(filename, data, **kwargs)
else:
disp = fits.Column(name=, format=, array=self.disp)
flux = fits.Column(name=, format=, array=self.flux)
var = fits.Column(name=, format=, array=self.variance)
table_hdu = fits.new_table([disp, flux, var])
hdu = fits.PrimaryHDU()
for key, value in self.headers.iteritems():
if len(key) > 8:
hdu.header.update(.format(key), value)
try:
hdu.header.update(key, value)
except ValueError:
logger.warn("Could not save header key/value combination: "\
"{0} = {1}".format(key, value))
hdulist = fits.HDUList([hdu, table_hdu])
return hdulist.writeto(filename, clobber=clobber, **kwargs) | Save the `Spectrum1D` object to the specified filename.
:param filename:
The filename to save the Spectrum1D object to.
:type filename:
str
:param clobber: [optional]
Whether to overwrite the `filename` if it already exists.
:type clobber:
bool
:raises IOError:
If the filename exists and we were not asked to clobber it. |
1,565 | def SETB(cpu, dest):
dest.write(Operators.ITEBV(dest.size, cpu.CF, 1, 0)) | Sets byte if below.
:param cpu: current CPU.
:param dest: destination operand. |
1,566 | def clear_threads(self):
for aThread in compat.itervalues(self.__threadDict):
aThread.clear()
self.__threadDict = dict() | Clears the threads snapshot. |
1,567 | def prt_txt(prt, data_nts, prtfmt=None, nt_fields=None, **kws):
lines = get_lines(data_nts, prtfmt, nt_fields, **kws)
if lines:
for line in lines:
prt.write(line)
else:
sys.stdout.write(" 0 items. NOT WRITING\n") | Print list of namedtuples into a table using prtfmt. |
1,568 | def cmServiceRequest(PriorityLevel_presence=0):
a = TpPd(pd=0x5)
b = MessageType(mesType=0x24)
c = CmServiceTypeAndCiphKeySeqNr()
e = MobileStationClassmark2()
f = MobileId()
packet = a / b / c / e / f
if PriorityLevel_presence is 1:
g = PriorityLevelHdr(ieiPL=0x8, eightBitPL=0x0)
packet = packet / g
return packet | CM SERVICE REQUEST Section 9.2.9 |
1,569 | def validNormalizeAttributeValue(self, doc, name, value):
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidNormalizeAttributeValue(doc__o, self._o, name, value)
return ret | Does the validation related extra step of the normalization
of attribute values: If the declared value is not CDATA,
then the XML processor must further process the normalized
attribute value by discarding any leading and trailing
space (#x20) characters, and by replacing sequences of
space (#x20) characters by single space (#x20) character. |
1,570 | def has_equal_value(state, ordered=False, ndigits=None, incorrect_msg=None):
if not hasattr(state, "parent"):
raise ValueError(
"You can only use has_equal_value() on the state resulting from check_column, check_row or check_result."
)
if incorrect_msg is None:
incorrect_msg = "Column `{{col}}` seems to be incorrect.{{ if ordered else }}"
has_nrows(state)
if not ordered:
stu_res, sol_res = sort_rows(state)
else:
stu_res = state.student_result
sol_res = state.solution_result
for sol_col_name, sol_col_vals in sol_res.items():
stu_col_vals = stu_res[sol_col_name]
if ndigits is not None:
try:
sol_col_vals = round_seq(sol_col_vals, ndigits)
stu_col_vals = round_seq(stu_col_vals, ndigits)
except:
pass
if sol_col_vals != stu_col_vals:
_msg = state.build_message(
incorrect_msg, fmt_kwargs={"col": sol_col_name, "ordered": ordered}
)
state.do_test(_msg)
return state | Verify if a student and solution query result match up.
This function must always be used after 'zooming' in on certain columns or records (check_column, check_row or check_result).
``has_equal_value`` then goes over all columns that are still left in the solution query result, and compares each column with the
corresponding column in the student query result.
Args:
ordered: if set to False, the default, all rows are sorted (according
to the first column and the following columns as tie breakers).
if set to True, the order of rows in student and solution query have to match.
digits: if specified, number of decimals to use when comparing column values.
incorrect_msg: if specified, this overrides the automatically generated feedback
message in case a column in the student query result does not match
a column in the solution query result.
:Example:
Suppose we are testing the following SELECT statements
* solution: ``SELECT artist_id as id, name FROM artists ORDER BY name``
* student : ``SELECT artist_id, name FROM artists``
We can write the following SCTs: ::
# passes, as order is not important by default
Ex().check_column('name').has_equal_value()
# fails, as order is deemed important
Ex().check_column('name').has_equal_value(ordered=True)
# check_column fails, as id is not in the student query result
Ex().check_column('id').has_equal_value()
# check_all_columns fails, as id not in the student query result
Ex().check_all_columns().has_equal_value() |
1,571 | def string_to_tokentype(s):
if isinstance(s, _TokenType):
return s
if not s:
return Token
node = Token
for item in s.split():
node = getattr(node, item)
return node | Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String |
1,572 | def get_nested_val(key_tuple, dict_obj):
if len(key_tuple) == 1:
return dict_obj[key_tuple[0]]
return get_nested_val(key_tuple[1:], dict_obj[key_tuple[0]]) | Return a value from nested dicts by the order of the given keys tuple.
Parameters
----------
key_tuple : tuple
The keys to use for extraction, in order.
dict_obj : dict
The outer-most dict to extract from.
Returns
-------
value : object
The extracted value, if exists. Otherwise, raises KeyError.
Example
-------
>>> dict_obj = {'a': {'b': 7}}
>>> get_nested_val(('a', 'b'), dict_obj)
7 |
1,573 | def validate(self, instance, value):
try:
floatval = float(value)
if not self.cast and abs(value - floatval) > TOL:
self.error(
instance=instance,
value=value,
extra=.format(TOL),
)
except (TypeError, ValueError):
self.error(instance, value, extra=)
_in_bounds(self, instance, floatval)
return floatval | Checks that value is a float and in min/max bounds
Non-float numbers are coerced to floats |
1,574 | def list_controller_revision_for_all_namespaces(self, **kwargs):
kwargs[] = True
if kwargs.get():
return self.list_controller_revision_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_controller_revision_for_all_namespaces_with_http_info(**kwargs)
return data | list_controller_revision_for_all_namespaces # noqa: E501
list or watch objects of kind ControllerRevision # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_controller_revision_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ControllerRevisionList
If the method is called asynchronously,
returns the request thread. |
1,575 | def hog(image, orientations=8, ksize=(5, 5)):
s0, s1 = image.shape[:2]
try:
k = hog.kernels[str(ksize) + str(orientations)]
except KeyError:
k = _mkConvKernel(ksize, orientations)
hog.kernels[str(ksize) + str(orientations)] = k
out = np.empty(shape=(s0, s1, orientations))
image[np.isnan(image)] = 0
for i in range(orientations):
out[:, :, i] = convolve(image, k[i])
return out | returns the Histogram of Oriented Gradients
:param ksize: convolution kernel size as (y,x) - needs to be odd
:param orientations: number of orientations in between rad=0 and rad=pi
similar to http://scikit-image.org/docs/dev/auto_examples/plot_hog.html
but faster and with less options |
1,576 | def subprocess_check_output(*args, cwd=None, env=None, stderr=False):
if stderr:
proc = yield from asyncio.create_subprocess_exec(*args, stderr=asyncio.subprocess.PIPE, cwd=cwd, env=env)
output = yield from proc.stderr.read()
else:
proc = yield from asyncio.create_subprocess_exec(*args, stdout=asyncio.subprocess.PIPE, cwd=cwd, env=env)
output = yield from proc.stdout.read()
if output is None:
return ""
return output.decode("utf-8", errors="ignore") | Run a command and capture output
:param *args: List of command arguments
:param cwd: Current working directory
:param env: Command environment
:param stderr: Read on stderr
:returns: Command output |
1,577 | def cleanup(self, force=False):
manager = self.getManager(force=force)
if manager is not None:
service = manager.current()
self.destroyManager(force=force)
else:
service = None
return service | Clean up Yadis-related services in the session and return
the most-recently-attempted service from the manager, if one
exists.
@param force: True if the manager should be deleted regardless
of whether it's a manager for self.url.
@return: current service endpoint object or None if there is
no current service |
1,578 | def tocimxml(value):
if isinstance(value, (tuple, list)):
array_xml = []
for v in value:
if v is None:
if SEND_VALUE_NULL:
array_xml.append(cim_xml.VALUE_NULL())
else:
array_xml.append(cim_xml.VALUE(None))
else:
array_xml.append(cim_xml.VALUE(atomic_to_cim_xml(v)))
value_xml = cim_xml.VALUE_ARRAY(array_xml)
return value_xml
if hasattr(value, ):
return value.tocimxml()
if value is None:
warnings.warn("A value of None for pywbem.tocimxml() has been "
"deprecated.",
DeprecationWarning, stacklevel=2)
return cim_xml.VALUE(atomic_to_cim_xml(value)) | Return the CIM-XML representation of the input object,
as an object of an appropriate subclass of :term:`Element`.
The returned CIM-XML representation is consistent with :term:`DSP0201`.
Parameters:
value (:term:`CIM object`, :term:`CIM data type`, :term:`number`, :class:`py:datetime.datetime`, or tuple/list thereof):
The input object.
Specifying `None` has been deprecated in pywbem 0.12.
Returns:
The CIM-XML representation, as an object of an appropriate subclass of
:term:`Element`. |
1,579 | def create_geometry(self, input_geometry, dip, upper_depth, lower_depth,
mesh_spacing=1.0):
assert((dip > 0.) and (dip <= 90.))
self.dip = dip
self._check_seismogenic_depths(upper_depth, lower_depth)
if not isinstance(input_geometry, Line):
if not isinstance(input_geometry, np.ndarray):
raise ValueError(
)
else:
self.fault_trace = Line([Point(row[0], row[1]) for row in
input_geometry])
else:
self.fault_trace = input_geometry
self.geometry = SimpleFaultSurface.from_fault_data(self.fault_trace,
self.upper_depth,
self.lower_depth,
self.dip,
mesh_spacing) | If geometry is defined as a numpy array then create instance of
nhlib.geo.line.Line class, otherwise if already instance of class
accept class
:param input_geometry:
Trace (line) of the fault source as either
i) instance of nhlib.geo.line.Line class
ii) numpy.ndarray [Longitude, Latitude]
:param float dip:
Dip of fault surface (in degrees)
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
:param float mesh_spacing:
Spacing of the fault mesh (km) {default = 1.0} |
1,580 | def create_widget(self):
context = self.get_context()
d = self.declaration
style = d.style or
self.widget = AutoCompleteTextView(context, None, style)
self.adapter = ArrayAdapter(context, ) | Create the underlying widget. |
1,581 | def channels_rename(self, *, channel: str, name: str, **kwargs) -> SlackResponse:
self._validate_xoxp_token()
kwargs.update({"channel": channel, "name": name})
return self.api_call("channels.rename", json=kwargs) | Renames a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
name (str): The new channel name. e.g. 'newchannel' |
1,582 | def hierarchyLookup(self, record):
def _get_lookup(cls):
if cls in self._hierarchyLookup:
return self._hierarchyLookup[cls]
for base in cls.__bases__:
results = _get_lookup(base)
if results:
return results
return (None, None)
tableType, column = _get_lookup(type(record))
if tableType and column:
return (tableType, column)
default = self._hierarchyLookup.get(None)
if default:
return default
return (None, None) | Looks up additional hierarchy information for the inputed record.
:param record | <orb.Table>
:return (<subclass of orb.Table> || None, <str> column) |
1,583 | def _certificate_required(cls, hostname, port=XCLI_DEFAULT_PORT,
ca_certs=None, validate=None):
if not ca_certs:
return False
xlog.debug("CONNECT SSL %s:%s, cert_file=%s",
hostname, port, ca_certs)
certificate = ssl.get_server_certificate((hostname, port),
ca_certs=None)
if validate:
return not validate(certificate)
return True | returns true if connection should verify certificate |
1,584 | def enqueue_conversion_path(url_string, to_type, enqueue_convert):
target_ts = TypeString(to_type)
foreign_res = ForeignResource(url_string)
typed_foreign_res = foreign_res.guess_typed()
if not typed_foreign_res.cache_exists():
typed_foreign_res.symlink_from(foreign_res)
original_ts = typed_foreign_res.typestring
path = singletons.converter_graph.find_path(original_ts, target_ts)
is_first = True
for converter_class, from_ts, to_ts in path:
converter = converter_class()
in_resource = TypedResource(url_string, from_ts)
if is_first:
in_resource = TypedForeignResource(url_string, from_ts)
out_resource = TypedResource(url_string, to_ts)
enqueue_convert(converter, in_resource, out_resource)
is_first = False | Given a URL string that has already been downloaded, enqueue
necessary conversion to get to target type |
1,585 | def _update_key(self, mask, key):
mask = np.asanyarray(mask)
if key in self._data:
self._data[key] = self._data[key][mask] | Mask the value contained in the DataStore at a specified key.
Parameters
-----------
mask: (n,) int
(n,) bool
key: hashable object, in self._data |
1,586 | def rows2skip(self, decdel):
if decdel == :
ms = self.matches_p
elif decdel == :
ms = self.matches_c
cnt = row = 0
for val1, val2 in zip(ms, ms[1:]):
row += 1
if val2 == val1 != 0:
pass
self.cnt = cnt
return row - EQUAL_CNT_REQ | Return the number of rows to skip based on the decimal delimiter
decdel.
When each record start to have the same number of matches, this
is where the data starts. This is the idea. And the number of
consecutive records to have the same number of matches is to be
EQUAL_CNT_REQ. |
1,587 | def json_item(model, target=None, theme=FromCurdoc):
/plotpetal_widthpetal_length/plot
with OutputDocumentFor([model], apply_theme=theme) as doc:
doc.title = ""
docs_json = standalone_docs_json([model])
doc = list(docs_json.values())[0]
root_id = doc[][][0]
return {
: target,
: root_id,
: doc,
} | Return a JSON block that can be used to embed standalone Bokeh content.
Args:
model (Model) :
The Bokeh object to embed
target (string, optional)
A div id to embed the model into. If None, the target id must
be supplied in the JavaScript call.
theme (Theme, optional) :
Defaults to the ``Theme`` instance in the current document.
Setting this to ``None`` uses the default theme or the theme
already specified in the document. Any other value must be an
instance of the ``Theme`` class.
Returns:
JSON-like
This function returns a JSON block that can be consumed by the BokehJS
function ``Bokeh.embed.embed_item``. As an example, a Flask endpoint for
``/plot`` might return the following content to embed a Bokeh plot into
a div with id *"myplot"*:
.. code-block:: python
@app.route('/plot')
def plot():
p = make_plot('petal_width', 'petal_length')
return json.dumps(json_item(p, "myplot"))
Then a web page can retrieve this JSON and embed the plot by calling
``Bokeh.embed.embed_item``:
.. code-block:: html
<script>
fetch('/plot')
.then(function(response) { return response.json(); })
.then(function(item) { Bokeh.embed.embed_item(item); })
</script>
Alternatively, if is more convenient to supply the target div id directly
in the page source, that is also possible. If `target_id` is omitted in the
call to this function:
.. code-block:: python
return json.dumps(json_item(p))
Then the value passed to ``embed_item`` is used:
.. code-block:: javascript
Bokeh.embed.embed_item(item, "myplot"); |
1,588 | def normalize(self, decl_string, arg_separator=None):
if not self.has_pattern(decl_string):
return decl_string
name, args = self.split(decl_string)
for i, arg in enumerate(args):
args[i] = self.normalize(arg)
return self.join(name, args, arg_separator) | implementation details |
1,589 | def execute(self):
from vsgen.util.logger import VSGLogger
VSGLogger.info(self._logname, self._message)
start = time.clock()
VSGWriter.write(self._writables, self._parallel)
end = time.clock()
VSGLogger.info(self._logname, "Wrote %s files in %s seconds:", len(self._writables), end - start) | Executes the command. |
1,590 | def hacking_docstring_start_space(physical_line, previous_logical, tokens):
r
docstring = is_docstring(tokens, previous_logical)
if docstring:
start, start_triple = _find_first_of(docstring, START_DOCSTRING_TRIPLE)
if docstring[len(start_triple)] == :
return (0, "H401: docstring should not start with"
" a space") | r"""Check for docstring not starting with space.
OpenStack HACKING guide recommendation for docstring:
Docstring should not start with space
Okay: def foo():\n '''This is good.'''
Okay: def foo():\n r'''This is good.'''
Okay: def foo():\n a = ''' This is not a docstring.'''
Okay: def foo():\n pass\n ''' This is not.'''
H401: def foo():\n ''' This is not.'''
H401: def foo():\n r''' This is not.''' |
1,591 | def dot_product_batched_head(q, k, v, gates_q, gates_k, mask_right=False):
nb_buckets = common_layers.shape_list(gates_q)[-1]
@expert_utils.add_name_scope()
def get_dispatcher(gates):
length = common_layers.shape_list(gates)[1]
nb_elems_to_dispatch = tf.reduce_sum(gates, axis=[1, 2])
nb_elems_to_dispatch = tf.reduce_max(nb_elems_to_dispatch)
nb_elems_to_dispatch = tf.to_int32(nb_elems_to_dispatch)
capacity = nb_elems_to_dispatch // nb_buckets * 2
capacity = tf.minimum(length, capacity)
tf.summary.scalar("dispatch_capacity", capacity, family="lsh")
return expert_utils.TruncatingDispatcher(gates, capacity)
def add_summary_capacity(x, prefix):
x = x[0, ...]
x = tf.reduce_sum(x, axis=0)
tf.summary.scalar(prefix + "_min", tf.reduce_min(x), family="lsh")
tf.summary.scalar(prefix + "_max", tf.reduce_max(x), family="lsh")
tf.summary.histogram(prefix + "capacity_distribution", x, family="lsh")
for i in range(3):
tf.summary.scalar("{}_{}".format(prefix, i), x[i], family="lsh")
add_summary_capacity(gates_q, "q")
add_summary_capacity(gates_k, "k")
q_dispatcher = get_dispatcher(gates_q)
k_dispatcher = get_dispatcher(gates_k)
q = q_dispatcher.dispatch(q)
k = k_dispatcher.dispatch(k)
v = k_dispatcher.dispatch(v)
bias = tf.expand_dims((k_dispatcher.nonpadding() - 1.0) * 1e9, 2)
if mask_right:
q_coordinate = tf.to_float(
tf.expand_dims(q_dispatcher.length_coordinate(), 3))
k_coordinate = tf.to_float(
tf.expand_dims(k_dispatcher.length_coordinate(), 2))
bias += tf.to_float(tf.greater(k_coordinate, q_coordinate)) * -1e9
v_out = dot_product_attention(q, k, v, bias=bias)
return q_dispatcher.combine(v_out) | Perform a dot product attention on a single sequence on a single head.
This function dispatch the q, k, v and loop over the buckets to compute the
attention dot product on each subsequences.
Args:
q (tf.Tensor): [batch*heads, length_q, depth_q]
k (tf.Tensor): [batch*heads, length_k, depth_q]
v (tf.Tensor): [batch*heads, length_k, depth_v]
gates_q (tf.Tensor): One-hot of shape [batch*heads, length_q, nb_buckets]
gates_k (tf.Tensor): One-hot of shape [batch*heads, length_k, nb_buckets]
mask_right (bool): Add a bias to prevent attention to the future
Returns:
tf.Tensor: [length_q, depth_v] |
1,592 | def visit_FormattedValue(self, node: AST,
dfltChaining: bool = True) -> str:
format_spec = node.format_spec
return f"{{{self.visit(node.value)}" \
f"{self.CONV_MAP.get(node.conversion, )}" \
f"{+self._nested_str(format_spec) if format_spec else }}}" | Return `node`s value formatted according to its format spec. |
1,593 | def _recurse(self, matrix, m_list, indices, output_m_list=[]):
while m_list[-1][1] == 0:
m_list = copy(m_list)
m_list.pop()
if not m_list:
matrix_sum = np.sum(matrix)
if matrix_sum < self._current_minimum:
self.add_m_list(matrix_sum, output_m_list)
return
if m_list[-1][1] > len(indices.intersection(m_list[-1][2])):
return
if len(m_list) == 1 or m_list[-1][1] > 1:
if self.best_case(matrix, m_list, indices) > self._current_minimum:
return
index = self.get_next_index(matrix, m_list[-1], indices)
m_list[-1][2].remove(index)
matrix2 = np.copy(matrix)
m_list2 = deepcopy(m_list)
output_m_list2 = copy(output_m_list)
matrix2[index, :] *= m_list[-1][0]
matrix2[:, index] *= m_list[-1][0]
output_m_list2.append([index, m_list[-1][3]])
indices2 = copy(indices)
indices2.remove(index)
m_list2[-1][1] -= 1
self._recurse(matrix2, m_list2, indices2, output_m_list2)
self._recurse(matrix, m_list, indices, output_m_list) | This method recursively finds the minimal permutations using a binary
tree search strategy.
Args:
matrix: The current matrix (with some permutations already
performed).
m_list: The list of permutations still to be performed
indices: Set of indices which haven't had a permutation
performed on them. |
1,594 | def setImagePlotAutoRangeOn(self, axisNumber):
setXYAxesAutoRangeOn(self, self.xAxisRangeCti, self.yAxisRangeCti, axisNumber) | Sets the image plot's auto-range on for the axis with number axisNumber.
:param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes). |
1,595 | def isPositiveStrand(self):
if self.strand is None and self.DEFAULT_STRAND == self.POSITIVE_STRAND:
return True
return self.strand == self.POSITIVE_STRAND | Check if this genomic region is on the positive strand.
:return: True if this element is on the positive strand |
1,596 | def to_text_diagram_drawer(
self,
*,
use_unicode_characters: bool = True,
qubit_namer: Optional[Callable[[ops.Qid], str]] = None,
transpose: bool = False,
precision: Optional[int] = 3,
qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,
get_circuit_diagram_info:
Optional[Callable[[ops.Operation,
protocols.CircuitDiagramInfoArgs],
protocols.CircuitDiagramInfo]]=None
) -> TextDiagramDrawer:
qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(
self.all_qubits())
qubit_map = {qubits[i]: i for i in range(len(qubits))}
if qubit_namer is None:
qubit_namer = lambda q: str(q) + ( if transpose else )
diagram = TextDiagramDrawer()
for q, i in qubit_map.items():
diagram.write(0, i, qubit_namer(q))
moment_groups = []
for moment in self._moments:
_draw_moment_in_diagram(moment,
use_unicode_characters,
qubit_map,
diagram,
precision,
moment_groups,
get_circuit_diagram_info)
w = diagram.width()
for i in qubit_map.values():
diagram.horizontal_line(i, 0, w)
if moment_groups:
_draw_moment_groups_in_diagram(moment_groups,
use_unicode_characters,
diagram)
if transpose:
diagram = diagram.transpose()
return diagram | Returns a TextDiagramDrawer with the circuit drawn into it.
Args:
use_unicode_characters: Determines if unicode characters are
allowed (as opposed to ascii-only diagrams).
qubit_namer: Names qubits in diagram. Defaults to str.
transpose: Arranges qubit wires vertically instead of horizontally.
precision: Number of digits to use when representing numbers.
qubit_order: Determines how qubits are ordered in the diagram.
get_circuit_diagram_info: Gets circuit diagram info. Defaults to
protocol with fallback.
Returns:
The TextDiagramDrawer instance. |
1,597 | def _generate_corpus_table(self, labels, ngrams):
html = []
for label in labels:
html.append(self._render_corpus_row(label, ngrams))
return .join(html) | Returns an HTML table containing data on each corpus' n-grams. |
1,598 | def _prefix_from_ip_int(self, ip_int):
trailing_zeroes = _count_righthand_zero_bits(ip_int,
self._max_prefixlen)
prefixlen = self._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = self._max_prefixlen // 8
details = _int_to_bytes(ip_int, byteslen, )
msg =
raise ValueError(msg % details)
return prefixlen | Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones |
1,599 | def pattern_for_view(self, view, action):
if getattr(view, , None):
return view.derive_url_pattern(self.path, action)
else:
return r % (self.path, action) | Returns the URL pattern for the passed in action. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.