Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
27,100 |
def index(self, strictindex):
return self._select(self._pointer.index(self.ruamelindex(strictindex)))
|
Return a chunk in a sequence referenced by index.
|
27,101 |
def put_key(key_name,
value,
description,
meta,
modify,
add,
lock,
key_type,
stash,
passphrase,
backend):
stash = _get_stash(backend, stash, passphrase)
try:
click.echo(.format(key_type))
stash.put(
name=key_name,
value=_build_dict_from_key_value(value),
modify=modify,
metadata=_build_dict_from_key_value(meta),
description=description,
lock=lock,
key_type=key_type,
add=add)
click.echo()
except GhostError as ex:
sys.exit(ex)
|
Insert a key to the stash
`KEY_NAME` is the name of the key to insert
`VALUE` is a key=value argument which can be provided multiple times.
it is the encrypted value of your key
|
27,102 |
def _construct_w(self, inputs):
weight_shape = self._kernel_shape + (1, 1)
if "w" not in self._initializers:
self._initializers["w"] = create_weight_initializer(weight_shape[:2],
dtype=inputs.dtype)
w = tf.get_variable("w",
shape=weight_shape,
dtype=inputs.dtype,
initializer=self._initializers["w"],
partitioner=self._partitioners.get("w", None),
regularizer=self._regularizers.get("w", None))
return w
|
Construct the convolution weight matrix.
Figures out the shape of the weight matrix, initialize it, and return it.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`.
Returns:
w: A weight matrix of the same type as `inputs` and of shape
[kernel_shape, 1, 1].
|
27,103 |
def getSubstituteType(self, elt, ps):
s type.
Parameters:
elt -- the DOM element being parsed
ps -- the ParsedSoap object.
No Type registed for xsi:type=(%s, %s)No registered xsi:type=(%s, %s), substitute for xsi:type=(%s, %s)Substitute Type (%s, %s) is not derived from %s' %
(self.type[0], self.type[1], pyclass), ps.Backtrace(elt))
return subclass((self.nspname, self.pname))
|
if xsi:type does not match the instance type attr,
check to see if it is a derived type substitution.
DONT Return the element's type.
Parameters:
elt -- the DOM element being parsed
ps -- the ParsedSoap object.
|
27,104 |
def save_figure_tofile(fig, fmt, fname):
root, ext = osp.splitext(fname)
if ext == and fmt == :
qimg = svg_to_image(fig)
qimg.save(fname)
else:
if fmt == and is_unicode(fig):
fig = fig.encode()
with open(fname, ) as f:
f.write(fig)
|
Save fig to fname in the format specified by fmt.
|
27,105 |
def add_categories(self, new_categories, inplace=False):
inplace = validate_bool_kwarg(inplace, )
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
|
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : bool, default False
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
|
27,106 |
def plot_spectrum(self, convention=, unit=, base=10.,
lmax=None, xscale=, yscale=, grid=True,
legend=None, axes_labelsize=None, tick_labelsize=None,
show=True, ax=None, fname=None, **kwargs):
if lmax is None:
lmax = self.lmax
spectrum = self.spectrum(convention=convention, unit=unit, base=base,
lmax=lmax)
ls = _np.arange(lmax + 1)
if ax is None:
fig, axes = _plt.subplots(1, 1)
else:
axes = ax
if axes_labelsize is None:
axes_labelsize = _mpl.rcParams[]
if tick_labelsize is None:
tick_labelsize = _mpl.rcParams[]
axes.set_xlabel(, fontsize=axes_labelsize)
if convention == :
axes.set_ylabel(, fontsize=axes_labelsize)
if legend is None:
if (unit == ):
legend =
elif (unit == ):
legend =
elif (unit == ):
legend =
elif convention == :
axes.set_ylabel(, fontsize=axes_labelsize)
if legend is None:
if (unit == ):
legend =
elif (unit == ):
legend =
elif (unit == ):
legend =
else:
axes.set_ylabel(, fontsize=axes_labelsize)
if legend is None:
if (unit == ):
legend =
elif (unit == ):
legend =
elif (unit == ):
legend =
if xscale == :
axes.set_xscale(, basex=base)
if yscale == :
axes.set_yscale(, basey=base)
if xscale == :
axes.plot(ls[1:lmax+1], spectrum[1:lmax+1], label=legend, **kwargs)
else:
axes.plot(ls[:lmax+1], spectrum[:lmax+1], label=legend, **kwargs)
axes.set(xlim=(ls[0], ls[lmax]))
axes.grid(grid, which=)
axes.minorticks_on()
axes.tick_params(labelsize=tick_labelsize)
axes.legend()
if ax is None:
fig.tight_layout(pad=0.5)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
|
Plot the spectrum as a function of spherical harmonic degree.
Usage
-----
x.plot_spectrum([convention, unit, base, lmax, xscale, yscale, grid,
axes_labelsize, tick_labelsize, legend, show, ax,
fname, **kwargs])
Parameters
----------
convention : str, optional, default = 'power'
The type of spectrum to plot: 'power' for power spectrum,
'energy' for energy spectrum, and 'l2norm' for the l2 norm
spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', plot the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', plot the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', plot the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum, and
the base to use for logarithmic axes.
lmax : int, optional, default = self.lmax
The maximum spherical harmonic degree to plot.
xscale : str, optional, default = 'lin'
Scale of the x axis: 'lin' for linear or 'log' for logarithmic.
yscale : str, optional, default = 'log'
Scale of the y axis: 'lin' for linear or 'log' for logarithmic.
grid : bool, optional, default = True
If True, plot grid lines.
legend : str, optional, default = None
Text to use for the legend.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
show : bool, optional, default = True
If True, plot to the screen.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
fname : str, optional, default = None
If present, and if axes is not specified, save the image to the
specified file.
**kwargs : keyword arguments, optional
Keyword arguments for pyplot.plot().
Description
-----------
This method plots either the power spectrum, energy spectrum, or
l2-norm spectrum. Total power is defined as the integral of the
function squared over all space, divided by the area the function
spans. If the mean of the function is zero, this is equivalent to the
variance of the function. The total energy is the integral of the
function squared over all space and is 4pi times the total power. For
normalized coefficients ('4pi', 'ortho', or 'schmidt'), the l2-norm is
the sum of the magnitude of the coefficients squared.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, which is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the
contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a).
|
27,107 |
def bandstats(filenames=None, num_sample_points=3, temperature=None,
degeneracy_tol=1e-4, parabolic=True):
if not filenames:
filenames = find_vasprun_files()
elif isinstance(filenames, str):
filenames = [filenames]
bandstructures = []
for vr_file in filenames:
vr = BSVasprun(vr_file, parse_projected_eigen=False)
bs = vr.get_band_structure(line_mode=True)
bandstructures.append(bs)
bs = get_reconstructed_band_structure(bandstructures)
if bs.is_metal():
logging.error()
sys.exit()
_log_band_gap_information(bs)
vbm_data = bs.get_vbm()
cbm_data = bs.get_cbm()
logging.info()
_log_band_edge_information(bs, vbm_data)
logging.info()
_log_band_edge_information(bs, cbm_data)
if parabolic:
logging.info()
else:
logging.info()
if temperature:
logging.error()
else:
hole_extrema = []
for spin, bands in vbm_data[].items():
hole_extrema.extend([(spin, band, kpoint) for band in bands
for kpoint in vbm_data[]])
elec_extrema = []
for spin, bands in cbm_data[].items():
elec_extrema.extend([(spin, band, kpoint) for band in bands
for kpoint in cbm_data[]])
hole_data = []
for extrema in hole_extrema:
hole_data.extend(get_fitting_data(bs, *extrema,
num_sample_points=num_sample_points))
elec_data = []
for extrema in elec_extrema:
elec_data.extend(get_fitting_data(bs, *extrema,
num_sample_points=num_sample_points))
logging.info()
for data in hole_data:
eff_mass = fit_effective_mass(data[], data[],
parabolic=parabolic)
data[] = eff_mass
_log_effective_mass_data(data, bs.is_spin_polarized, mass_type=)
logging.info()
for data in elec_data:
eff_mass = fit_effective_mass(data[], data[],
parabolic=parabolic)
data[] = eff_mass
_log_effective_mass_data(data, bs.is_spin_polarized)
return {: hole_data, : elec_data}
|
Calculate the effective masses of the bands of a semiconductor.
Args:
filenames (:obj:`str` or :obj:`list`, optional): Path to vasprun.xml
or vasprun.xml.gz file. If no filenames are provided, the code
will search for vasprun.xml or vasprun.xml.gz files in folders
named 'split-0*'. Failing that, the code will look for a vasprun in
the current directory. If a :obj:`list` of vasprun files is
provided, these will be combined into a single band structure.
num_sample_points (:obj:`int`, optional): Number of k-points to sample
when fitting the effective masses.
temperature (:obj:`int`, optional): Find band edges within kB * T of
the valence band maximum and conduction band minimum. Not currently
implemented.
degeneracy_tol (:obj:`float`, optional): Tolerance for determining the
degeneracy of the valence band maximum and conduction band minimum.
parabolic (:obj:`bool`, optional): Use a parabolic fit of the band
edges. If ``False`` then nonparabolic fitting will be attempted.
Defaults to ``True``.
Returns:
dict: The hole and electron effective masses. Formatted as a
:obj:`dict` with keys: ``'hole_data'`` and ``'electron_data'``. The
data is a :obj:`list` of :obj:`dict` with the keys:
'effective_mass' (:obj:`float`)
The effective mass in units of electron rest mass, :math:`m_0`.
'energies' (:obj:`numpy.ndarray`)
Band eigenvalues in eV.
'distances' (:obj:`numpy.ndarray`)
Distances of the k-points in reciprocal space.
'band_id' (:obj:`int`)
The index of the band,
'spin' (:obj:`~pymatgen.electronic_structure.core.Spin`)
The spin channel
'start_kpoint' (:obj:`int`)
The index of the k-point at which the band extrema occurs
'end_kpoint' (:obj:`int`)
|
27,108 |
def _set_guard(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=guard.guard, is_container=, presence=False, yang_name="guard", rest_name="guard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u"Change an interfacedisplay-when((/protocol/spanning-tree/stp) or(/protocol/spanning-tree/rstp))cli-incomplete-nourn:brocade.com:mgmt:brocade-xstpbrocade-xstpcontainererror-stringdefined-typegenerated-type_set'):
self._set()
|
Setter method for guard, mapped from YANG variable /interface/port_channel/spanning_tree/guard (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_guard is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_guard() directly.
|
27,109 |
def _make_unique_slug(slug: str, language: str, is_unique: Callable[[str], bool]) -> str:
index = 1
unique_slug = slug
while not is_unique(unique_slug, language):
unique_slug = % (slug, index)
index += 1
return unique_slug
|
Guarentees that the specified slug is unique by appending
a number until it is unique.
Arguments:
slug:
The slug to make unique.
is_unique:
Function that can be called to verify
whether the generate slug is unique.
Returns:
A guarenteed unique slug.
|
27,110 |
def accuracy(self):
r
if self.population() == 0:
return float()
return (self._tp + self._tn) / self.population()
|
r"""Return accuracy.
Accuracy is defined as :math:`\frac{tp + tn}{population}`
Cf. https://en.wikipedia.org/wiki/Accuracy
Returns
-------
float
The accuracy of the confusion table
Example
-------
>>> ct = ConfusionTable(120, 60, 20, 30)
>>> ct.accuracy()
0.782608695652174
|
27,111 |
def optimize(self, loss, num_async_replicas=1, use_tpu=False):
lr = learning_rate.learning_rate_schedule(self.hparams)
if num_async_replicas > 1:
log_info("Dividing learning rate by num_async_replicas: %d",
num_async_replicas)
lr /= math.sqrt(float(num_async_replicas))
train_op = optimize.optimize(loss, lr, self.hparams, use_tpu=use_tpu)
return train_op
|
Return a training op minimizing loss.
|
27,112 |
def _validate_j2k_colorspace(self, cparams, colorspace):
if cparams.codec_fmt == opj2.CODEC_J2K and colorspace is not None:
msg =
raise IOError(msg)
|
Cannot specify a colorspace with J2K.
|
27,113 |
def __parse_affiliations_json(self, affiliations, uuid):
enrollments = []
for affiliation in affiliations.values():
name = self.__encode(affiliation[])
try:
start_date = str_to_datetime(affiliation[])
end_date = str_to_datetime(affiliation[])
except InvalidDateError as e:
raise InvalidFormatError(cause=str(e))
if not start_date and not end_date:
continue
if not start_date:
start_date = MIN_PERIOD_DATE
if not end_date:
end_date = MAX_PERIOD_DATE
org = self._organizations.get(name, None)
if org:
start_date = org.active if start_date < org.active else start_date
end_date = org.inactive if end_date > org.inactive else end_date
if not org:
org = Organization(name=name)
org.active = MIN_PERIOD_DATE
org.inactive = MAX_PERIOD_DATE
enrollment = Enrollment(start=start_date, end=end_date,
organization=org)
enrollments.append(enrollment)
return enrollments
|
Parse identity's affiliations from a json dict
|
27,114 |
def get(self):
if len(self.call_queue):
return self.apply(lambda x: x).get()
try:
return ray.get(self.oid)
except RayTaskError as e:
handle_ray_task_error(e)
|
Gets the object out of the plasma store.
Returns:
The object from the plasma store.
|
27,115 |
def snooze(self, from_email, duration):
if from_email is None or not isinstance(from_email, six.string_types):
raise MissingFromEmail(from_email)
endpoint = .join((self.endpoint, self.id, ))
add_headers = {: from_email, }
return self.__class__.create(
endpoint=endpoint,
api_key=self.api_key,
add_headers=add_headers,
data_key=,
data=duration,
)
|
Snooze this incident for `duration` seconds.
|
27,116 |
def options(self, *args, **kwargs):
self.enable_cache()
self.handle_session_cookie()
self.preflight()
if self.verify_origin():
allowed_methods = getattr(self, , )
self.set_header(, allowed_methods)
self.set_header(, allowed_methods)
self.set_status(204)
else:
self.set_status(403)
self.finish()
|
XHR cross-domain OPTIONS handler
|
27,117 |
def get_global_gradient_norm(self) -> float:
exec_norms = [global_norm([arr for arr in exe.grad_arrays if arr is not None]) for exe in self.executors]
norm_val = sum(exec_norms) / float(len(exec_norms))
norm_val *= self.optimizer.rescale_grad
return norm_val
|
Returns global gradient norm.
|
27,118 |
def extract_mime(self, mime, def_mime=):
self[] = def_mime
if mime:
self[] = self.MIME_RE.split(mime, 1)[0]
self[] = mime
|
Utility function to extract mimetype only
from a full content type, removing charset settings
|
27,119 |
def control_group(self, control_group_id, ctrl, shift, alt):
action = sc_pb.Action()
select = action.action_ui.control_group
mod = sc_ui.ActionControlGroup
if not ctrl and not shift and not alt:
select.action = mod.Recall
elif ctrl and not shift and not alt:
select.action = mod.Set
elif not ctrl and shift and not alt:
select.action = mod.Append
elif not ctrl and not shift and alt:
select.action = mod.SetAndSteal
elif not ctrl and shift and alt:
select.action = mod.AppendAndSteal
else:
return
select.control_group_index = control_group_id
return action
|
Act on a control group, selecting, setting, etc.
|
27,120 |
def inv(self):
result = Rotation(self.r.transpose())
result._cache_inv = self
return result
|
The inverse rotation
|
27,121 |
def _deliverAnswer(self, answer):
router = self.siteRouter
if answer.deliveryDeferred is None:
d = answer.deliveryDeferred = router.routeAnswer(
answer.originalSender, answer.originalTarget, answer.value,
answer.messageID)
def destroyAnswer(result):
answer.deleteFromStore()
def transportErrorCheck(f):
answer.deliveryDeferred = None
f.trap(MessageTransportError)
d.addCallbacks(destroyAnswer, transportErrorCheck)
d.addErrback(log.err)
|
Attempt to deliver an answer to a message sent to this store, via my
store's parent's L{IMessageRouter} powerup.
@param answer: an L{AlreadyAnswered} that contains an answer to a
message sent to this store.
|
27,122 |
def serialize(transform, **kwargs):
{"params": {"rate": 1.5},
"__class__": {"py/type": "muda.deformers.time.TimeStretch"}}
params = transform.get_params()
return jsonpickle.encode(params, **kwargs)
|
Serialize a transformation object or pipeline.
Parameters
----------
transform : BaseTransform or Pipeline
The transformation object to be serialized
kwargs
Additional keyword arguments to `jsonpickle.encode()`
Returns
-------
json_str : str
A JSON encoding of the transformation
See Also
--------
deserialize
Examples
--------
>>> D = muda.deformers.TimeStretch(rate=1.5)
>>> muda.serialize(D)
'{"params": {"rate": 1.5},
"__class__": {"py/type": "muda.deformers.time.TimeStretch"}}'
|
27,123 |
async def start(request):
global session
try:
body = await request.json()
except json.decoder.JSONDecodeError:
log.debug("No body in {}".format(request))
body = {}
if not session or body.get():
hardware = hw_from_req(request)
if body.get() and session:
await release(data={})
session = SessionManager(hardware)
res = init_pipette()
if res:
status = 201
data = {: session.id, : res}
else:
session = None
status = 403
data = {: }
else:
data = {:
}
status = 409
return web.json_response(data, status=status)
|
Begins the session manager for factory calibration, if a session is not
already in progress, or if the "force" key is specified in the request. To
force, use the following body:
{
"force": true
}
:return: The current session ID token or an error message
|
27,124 |
def set_plot_tcrhoc(self,linestyle=[],burn_limit=0.997,marker=[],markevery=500,end_model=[-1],deg_line=True):
if len(linestyle)==0:
linestyle=200*[]
for i in range(len(self.runs_H5_surf)):
m1p65_last=se(self.runs_H5_out[i])
t1_model=-1
if end_model[i] != -1:
t1_model=end_model[i]
model=m1p65_last.se.cycles
model_list=[]
for k in range(0,len(model),5):
model_list.append(model[k])
rho1=m1p65_last.get(model_list,)
T1=m1p65_last.get(model_list,)
T_unit=m1p65_last.get()
mass=m1p65_last.get(model_list,)
mini=m1p65_last.get()
zini=m1p65_last.get()
H-1He-4He-3C-12Ne-20O-16Si-28
rho=[]
T=[]
for k in range(len(model_list)):
rho_center=rho1[k][0]
T_center=T1[k][0]
rho.append(rho_center)
T.append(T_center)
T=np.log10(np.array(T)*T_unit)
rho=np.log10(np.array(rho))
figure(1)
label=str(mini)++str(zini)
plt.plot(rho,T,label=label,linestyle=linestyle[i],marker=marker[i],markevery=markevery)
b-$P_e = P_{e,deg}$changed cycles:H_startobHe_startorC_startogNe_startDbO_startDrSi_startDg
plt.rcParams.update({: 16})
plt.rc(, labelsize=16)
plt.rc(, labelsize=16)
legend(loc=4)
plt.xlabel(,fontsize=18)
plt.ylabel(,fontsize=18)
b-$P_e = P_{e,deg}$oborogDbDrDgfont.sizextickyticklog $\\rho_{\\rm c}$log $T_{\\rm c}$
i+=1
if deg_line==True:
rho=np.arange(0,9,0.01)
T_degeneracy=2./3. *rho +np.log10(1.207e5 * 1.8/(2.**(5./3.)))
plt.plot(rho,T_degeneracy,color=,linestyle=,label=)
plt.legend(loc=2)
|
Plots HRDs
end_model - array, control how far in models a run is plottet, if -1 till end
symbs_1 - set symbols of runs
|
27,125 |
def parse_duration_with_start(start, duration):
elements = _parse_duration_string(_clean(duration))
year, month = _year_month_delta_from_elements(elements)
end = start.replace(
year=start.year + year,
month=start.month + month
)
del elements[]
del elements[]
end += _timedelta_from_elements(elements)
return start, end - start
|
Attepmt to parse an ISO8601 formatted duration based on a start datetime.
Accepts a ``duration`` and a start ``datetime``. ``duration`` must be
an ISO8601 formatted string.
Returns a ``datetime.timedelta`` object.
|
27,126 |
def collapse_witnesses(self):
if self._matches.empty:
self._matches.rename(columns={constants.SIGLUM_FIELDNAME:
constants.SIGLA_FIELDNAME},
inplace=True)
return
self._matches.loc[:, constants.SIGLA_FIELDNAME] = \
self._matches[constants.SIGLUM_FIELDNAME]
grouped = self._matches.groupby(
[constants.WORK_FIELDNAME, constants.NGRAM_FIELDNAME,
constants.COUNT_FIELDNAME], sort=False)
def merge_sigla(df):
merged = df[0:1]
sigla = list(df[constants.SIGLA_FIELDNAME])
sigla.sort()
merged[constants.SIGLUM_FIELDNAME] = .join(sigla)
return merged
self._matches = grouped.apply(merge_sigla)
del self._matches[constants.SIGLA_FIELDNAME]
self._matches.rename(columns={constants.SIGLUM_FIELDNAME:
constants.SIGLA_FIELDNAME},
inplace=True)
|
Groups together witnesses for the same n-gram and work that has the
same count, and outputs a single row for each group.
This output replaces the siglum field with a sigla field that
provides a comma separated list of the witness sigla. Due to
this, it is not necessarily possible to run other Results
methods on results that have had their witnesses collapsed.
|
27,127 |
def fetch_suvi_l1b(self, product, correct=True, median_kernel=5):
if self.date < datetime(2018, 5, 23) and not (self.date >= datetime(2017, 9, 6) \
and self.date <= datetime(2017, 9, 10, 23, 59)):
print("SUVI data is only available after 2018-5-23")
return product, None, None
url = self.suvi_base_url + product + "/{}/{:02d}/{:02d}".format(self.date.year, self.date.month, self.date.day)
if self.verbose:
print("Requesting from {}".format(url))
try:
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as response:
page = response.read()
except (URLError, HTTPError):
msg = "The SUVI URL you requested, {}, appears to be unavailable. Check it through a web browser."
raise RuntimeError(msg.format(url))
soup = BeautifulSoup(page, )
links = [link[] for link in soup.find_all(, href=True)]
links = [link for link in links if "SUVI" in link]
meta = [self.parse_filename_meta(fn) for fn in links if ".fits" in fn]
links = sorted(meta, key=lambda m: np.abs((m[2] - self.date).total_seconds()))[:10]
links = [fn for fn, _, _, _, _ in links]
i = 0
def download_and_check(i):
try:
urllib.request.urlretrieve(url + "/" + links[i], "{}.fits".format(product))
except (URLError, HTTPError):
msg = "THE SUVI file you requested, {}, appears to be unvailable. Check if the website is correct."
raise RuntimeError(msg.format(url + "/" + links[i]))
with fits.open("{}.fits".format(product)) as hdu:
head = hdu[0].header
return head[] > 0.5
while not download_and_check(i):
i += 1
with fits.open("{}.fits".format(product)) as hdu:
head = hdu[0].header
data = hdu[0].data
os.remove("{}.fits".format(product))
if correct:
data[np.isnan(data)] = 0
data[data < 0] = 0
if median_kernel:
data = medfilt(data, median_kernel)
data, head = self.align_solar_fov(head, data, 2.5, 2.0, rotate=True, scale=False)
if self.verbose:
print(product, " is using ", head[])
return product, head, data
|
Given a product keyword, downloads the SUVI l1b image into the current directory.
NOTE: the suvi_l1b_url must be properly set for the Fetcher object
:param product: the keyword for the product, e.g. suvi-l1b-fe094
:param correct: remove nans and negatives
:return: tuple of product name, fits header, and data object
the header and data object will be None if the request failed
|
27,128 |
def stream(self, muted=values.unset, hold=values.unset, coaching=values.unset,
limit=None, page_size=None):
limits = self._version.read_limits(limit, page_size)
page = self.page(muted=muted, hold=hold, coaching=coaching, page_size=limits[], )
return self._version.stream(page, limits[], limits[])
|
Streams ParticipantInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param bool muted: Whether to return only participants that are muted
:param bool hold: Whether to return only participants that are on hold
:param bool coaching: Whether to return only participants who are coaching another call
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.conference.participant.ParticipantInstance]
|
27,129 |
def retrieve(self, id) :
_, _, source = self.http_client.get("/sources/{id}".format(id=id))
return source
|
Retrieve a single source
Returns a single source available to the user by the provided id
If a source with the supplied unique identifier does not exist it returns an error
:calls: ``get /sources/{id}``
:param int id: Unique identifier of a Source.
:return: Dictionary that support attriubte-style access and represent Source resource.
:rtype: dict
|
27,130 |
def load_mo(self, state, page_idx):
try:
key = next(self._storage.irange(maximum=page_idx, reverse=True))
except StopIteration:
return None
else:
return self._storage[key]
|
Loads a memory object from memory.
:param page_idx: the index into the page
:returns: a tuple of the object
|
27,131 |
def _run_xmlsec(self, com_list, extra_args):
with NamedTemporaryFile(suffix=, delete=self._xmlsec_delete_tmpfiles) as ntf:
com_list.extend([, ntf.name])
com_list += extra_args
logger.debug(, .join(com_list))
pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
p_out, p_err = pof.communicate()
p_out = p_out.decode()
p_err = p_err.decode()
if pof.returncode != 0:
errmsg = "returncode={code}\nerror={err}\noutput={out}".format(
code=pof.returncode, err=p_err, out=p_out
)
logger.error(errmsg)
raise XmlsecError(errmsg)
ntf.seek(0)
return p_out, p_err, ntf.read()
|
Common code to invoke xmlsec and parse the output.
:param com_list: Key-value parameter list for xmlsec
:param extra_args: Positional parameters to be appended after all
key-value parameters
:result: Whatever xmlsec wrote to an --output temporary file
|
27,132 |
def showinfo(self):
print ,self.money
print ,self.store
print ,self.avgprice
|
總覽顯示
|
27,133 |
def reading(self):
sys.stdout.write("{0}Reading package lists...{1} ".format(
self.meta.color["GREY"], self.meta.color["ENDC"]))
sys.stdout.flush()
|
Message reading
|
27,134 |
def remove_kv_store(self, key):
data = {
: ,
: key
}
return self.post(self.make_url("/useragent-kv"), data=to_json(data),
headers=self.default_headers).text
|
Remove a key-value store entry.
:param key: string
|
27,135 |
def validateInterfaceName(n):
try:
if not in n:
raise Exception()
if in n:
raise Exception()
if len(n) > 255:
raise Exception()
if n[0] == :
raise Exception()
if n[0].isdigit():
raise Exception()
if if_re.search(n):
raise Exception(
)
if dot_digit_re.search(n):
raise Exception(
)
except Exception as e:
raise MarshallingError( % (n, str(e)))
|
Verifies that the supplied name is a valid DBus Interface name. Throws
an L{error.MarshallingError} if the format is invalid
@type n: C{string}
@param n: A DBus interface name
|
27,136 |
def dollars_to_math(source):
r
s = "\n".join(source)
if s.find("$") == -1:
return
global _data
_data = {}
def repl(matchobj):
global _data
s = matchobj.group(0)
t = "___XXX_REPL_%d___" % len(_data)
_data[t] = s
return t
s = re.sub(r"({[^{}$]*\$[^{}$]*\$[^{}]*})", repl, s)
dollars = re.compile(r"(?<!\$)(?<!\\)\$([^\$]+?)\$")
slashdollar = re.compile(r"\\\$")
s = dollars.sub(r":math:`\1`", s)
s = slashdollar.sub(r"$", s)
for r in _data:
s = s.replace(r, _data[r])
source[:] = [s]
|
r"""
Replace dollar signs with backticks.
More precisely, do a regular expression search. Replace a plain
dollar sign ($) by a backtick (`). Replace an escaped dollar sign
(\$) by a dollar sign ($). Don't change a dollar sign preceded or
followed by a backtick (`$ or $`), because of strings like
"``$HOME``". Don't make any changes on lines starting with
spaces, because those are indented and hence part of a block of
code or examples.
This also doesn't replaces dollar signs enclosed in curly braces,
to avoid nested math environments, such as ::
$f(n) = 0 \text{ if $n$ is prime}$
Thus the above line would get changed to
`f(n) = 0 \text{ if $n$ is prime}`
|
27,137 |
def geoplot(df,
filter=None, n=0, p=0, sort=None,
x=None, y=None, figsize=(25, 10), inline=False,
by=None, cmap=, **kwargs):
import geoplot as gplt
import geopandas as gpd
from shapely.geometry import Point
df = nullity_filter(df, filter=filter, n=n, p=p)
df = nullity_sort(df, sort=sort)
nullity = df.notnull().sum(axis=) / df.shape[1]
if x and y:
gdf = gpd.GeoDataFrame(nullity, columns=[],
geometry=df.apply(lambda srs: Point(srs[x], srs[y]), axis=))
else:
raise ValueError("The and parameters must be specified.")
if by:
if df[by].isnull().any():
warnings.warn(.format(by))
df = df.dropna(subset=[by])
gdf = gdf.loc[df.index]
vc = df[by].value_counts()
if (vc < 3).any():
warnings.warn(
.format(by))
where = df[by].isin((df[by].value_counts() > 2).where(lambda b: b).dropna().index.values)
gdf = gdf.loc[where]
gdf[by] = df[by]
gplt.aggplot(gdf, figsize=figsize, hue=, agg=np.average, cmap=cmap, by=by, edgecolor=, **kwargs)
ax = plt.gca()
if inline:
plt.show()
else:
return ax
|
Generates a geographical data nullity heatmap, which shows the distribution of missing data across geographic
regions. The precise output depends on the inputs provided. If no geographical context is provided, a quadtree
is computed and nullities are rendered as abstract geographic squares. If geographical context is provided in the
form of a column of geographies (region, borough. ZIP code, etc.) in the `DataFrame`, convex hulls are computed
for each of the point groups and the heatmap is generated within them.
:param df: The DataFrame whose completeness is being geoplotted.
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default).
:param sort: The sort to apply to the heatmap. Should be one of "ascending", "descending", or None.
:param n: The cap on the number of columns to include in the filtered DataFrame.
:param p: The cap on the percentage fill of the columns in the filtered DataFrame.
:param figsize: The size of the figure to display. This is a `matplotlib` parameter which defaults to `(25, 10)`.
:param x: The variable in the dataset containing the x-coordinates of the dataset.
:param y: The variable in the dataset containing the y-coordinates of the dataset.
:param by: If specified, plot in convex hull mode, using the given column to cluster points in the same area. If
not specified, plot in quadtree mode.
:param cmap: The colormap to display the data with. Defaults to `YlGn`.
:param inline: Whether or not the figure is inline. If it's not then instead of getting plotted, this method will
return its figure.
:param kwargs: Additional keyword arguments are passed to the underlying `geoplot` function.
:return: If `inline` is False, the underlying `matplotlib.figure` object. Else, nothing.
|
27,138 |
def x_vs_y(collection_x, collection_y, title_x=None, title_y=None, width=43, filter_none=False):
data = merge_x_y(collection_x, collection_y, filter_none)
max_value = get_max_x_y(data)
bins_total = int(float(max_value) / width) + 1
if title_x is not None and title_y is not None:
headers = [title_x, title_y]
else:
headers = None
result = []
for item in sorted(data):
bins_x = int((float(data[item][]) / float(max_value)) * bins_total) + 1
bins_y = int((float(data[item][]) / float(max_value)) * bins_total) + 1
print(bins_x)
print(bins_y)
result.append([item, * bins_x, * bins_y])
result = to_smart_columns(result, headers=headers)
return result
|
Print a histogram with bins for x to the left and bins of y to the right
|
27,139 |
def get_addressbooks(self, order=None, append_remaining=True):
order = order or []
abooks = []
for a in order:
if a:
if a.abook:
abooks.append(a.abook)
if append_remaining:
for a in self._accounts:
if a.abook and a.abook not in abooks:
abooks.append(a.abook)
return abooks
|
returns list of all defined :class:`AddressBook` objects
|
27,140 |
def consult_response_hook(self, item_session: ItemSession) -> Actions:
try:
return self.hook_dispatcher.call(
PluginFunctions.handle_response, item_session
)
except HookDisconnected:
return Actions.NORMAL
|
Return scripting action when a response ends.
|
27,141 |
def get_telnet_template(auth, url, template_name=None):
f_url = url + "/imcrs/plat/res/telnet?start=0&size=10000&desc=false&total=false"
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
telnet_templates = (json.loads(response.text))
template = None
if type(telnet_templates[]) is dict:
my_templates = [telnet_templates[]]
telnet_templates[] = my_templates
if template_name is None:
return telnet_templates[]
elif template_name is not None:
for telnet_template in telnet_templates[]:
if telnet_template[] == template_name:
template = [telnet_template]
print (type(template))
if template == None:
return 404
else:
return template
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_telnet_templates: An Error has occured"
|
Takes no input, or template_name as input to issue RESTUL call to HP IMC
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param template_name: str value of template name
:return list object containing one or more dictionaries where each dictionary represents one
telnet template
:rtype list
|
27,142 |
def get_assessment_basic_authoring_session_for_bank(self, bank_id, proxy):
if not self.supports_assessment_basic_authoring():
raise errors.Unimplemented()
return sessions.AssessmentBasicAuthoringSession(bank_id, proxy, self._runtime)
|
Gets the ``OsidSession`` associated with the assessment authoring service for the given bank.
arg: bank_id (osid.id.Id): the ``Id`` of a bank
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.AssessmentBasicAuthoringSession) - an
``AssessmentBasicAuthoringSession``
raise: NotFound - ``bank_id`` not found
raise: NullArgument - ``bank_id`` or ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_assessment_basic_authoring()`` or
``supports_visibe_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_basic_authoring()`` and
``supports_visibe_federation()`` is ``true``.*
|
27,143 |
def get_clan_war(self, tag: crtag, **params: keys):
url = self.api.CLAN + + tag +
return self._get_model(url, **params)
|
Get inforamtion about a clan's current clan war
Parameters
----------
*tag: str
A valid clan tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
|
27,144 |
def _datatarget_defaults(args, default_args):
default_data = default_args.get("datatarget", [])
for x in default_args.get("toolplus", []):
val = None
if x == "data":
val = "gemini"
elif x in ["cadd", "dbnsfp", "dbscsnv", "kraken", "gnomad"]:
val = x
if val and val not in default_data:
default_data.append(val)
new_val = getattr(args, "datatarget")
for x in default_data:
if x not in new_val:
new_val.append(x)
has_std_target = False
std_targets = ["variation", "rnaseq", "smallrna"]
for target in std_targets:
if target in new_val:
has_std_target = True
break
if not has_std_target:
new_val = new_val + std_targets
setattr(args, "datatarget", new_val)
return args
|
Set data installation targets, handling defaults.
Sets variation, rnaseq, smallrna as default targets if we're not
isolated to a single method.
Provides back compatibility for toolplus specifications.
|
27,145 |
def address(self):
if self.isDirect():
base36 = self._iban[4:]
asInt = int(base36, 36)
return to_checksum_address(pad_left_hex(baseN(asInt, 16), 20))
return ""
|
Should be called to get client direct address
@method address
@returns {String} client direct address
|
27,146 |
def _raise_on_error(data: Union[str, dict]) -> None:
if isinstance(data, str):
raise_error(data)
elif in data and data[] != :
raise_error(data[][])
|
Raise the appropriate exception on error.
|
27,147 |
def search(self, query):
results = self.skype.conn("GET", SkypeConnection.API_DIRECTORY,
auth=SkypeConnection.Auth.SkypeToken,
params={"searchstring": query, "requestId": "0"}).json().get("results", [])
return [SkypeUser.fromRaw(self.skype, json.get("nodeProfileData", {})) for json in results]
|
Search the Skype Directory for a user.
Args:
query (str): name to search for
Returns:
SkypeUser list: collection of possible results
|
27,148 |
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
axis = self._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return self.__constructor__(
query_compiler=self._query_compiler.cumprod(
axis=axis, skipna=skipna, **kwargs
)
)
|
Perform a cumulative product across the DataFrame.
Args:
axis (int): The axis to take product on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative product of the DataFrame.
|
27,149 |
def get_job_logs(id):
crawler_job = models.CrawlerJob.query.filter_by(id=id).one_or_none()
if crawler_job is None:
click.secho(
(
"CrawlJob %s was not found, maybe ityellowyellowLog',
)
|
Get the crawl logs from the job.
|
27,150 |
def majority_vote_byte_scan(relfilepath, fileslist, outpath, blocksize=65535, default_char_null=False):
t know if any of the entries are correct about this character).
relfilepath is the filename or the relative file path relative to the parent directory (ie, this is the relative path so that we can compare the files from several directories).reads a string filepath, open the file
else:
fileshandles.append(open(filepath, ))
if hasattr(outpath, ):
outfile = outpath
else:
outpathfull = os.path.join(outpath, relfilepath)
pardir = os.path.dirname(outpathfull)
if not os.path.exists(pardir):
os.makedirs(pardir)
outfile = open(outpathfull, )
if fileshandles:
create_dir_if_not_exist(os.path.dirname(outpathfull))
buf = 1
while (buf):
buf = fileshandles[0].read()
outfile.write(buf)
outfile.flush()
return (1, "Error with file %s: only %i copies available, cannot vote (need at least 3)! Copied the first file from the first folder, verbatim." % (relfilepath, len(fileshandles)))
errors = []
entries = [1]*len(fileshandles)
while (entries.count() < len(fileshandles)):
final_entry = []
for i in xrange(len(fileshandles)):
entries[i] = fileshandles[i].read(blocksize)
if entries.count() == len(fileshandles):
break
elif len(entries) == 1:
final_entry = entries[0]
else:
for i in xrange(max(len(entry) for entry in entries)):
hist = {}
for entry in entries:
if len(hist) == 1:
final_entry.append(chr(int(hist.iterkeys().next())))
continue
elif len(hist) > 1:
skeys = sorted(hist, key=hist.get, reverse=True)
else:
final_entry.append(chr(int(skeys[0])))
continue
outfile.write(final_entry)
outfile.flush()
if errors:
error_msg = "Unrecoverable corruptions (because of ambiguity) in file %s on characters: %s." % (relfilepath, [hex(int(x)) for x in errors])
return (1, error_msg)
for fh in fileshandles:
fh.close()
if outfile != outpath:
outfile.flush()
outfile.close()
return (0, None)
|
Takes a list of files in string format representing the same data, and disambiguate by majority vote: for position in string, if the character is not the same accross all entries, we keep the major one. If none, it will be replaced by a null byte (because we can't know if any of the entries are correct about this character).
relfilepath is the filename or the relative file path relative to the parent directory (ie, this is the relative path so that we can compare the files from several directories).
|
27,151 |
def get_custom_layouts_active_label(self):
layout_active_label = Active_QLabel(self,
QPixmap(umbra.ui.common.get_resource_path(UiConstants.custom_layouts_icon)),
QPixmap(
umbra.ui.common.get_resource_path(
UiConstants.custom_layouts_hover_icon)),
QPixmap(umbra.ui.common.get_resource_path(
UiConstants.custom_layouts_active_icon)))
layout_active_label.setObjectName("Custom_Layouts_active_label")
self.__custom_layouts_menu = QMenu("Layouts", layout_active_label)
for layout in self.__user_layouts:
self.__container.layouts_manager.register_layout(layout.identity, layout)
self.__custom_layouts_menu.addAction(self.__container.actions_manager.register_action(
"Actions|Umbra|ToolBar|Layouts|Restore layout {0}".format(layout.name),
shortcut=layout.shortcut,
slot=functools.partial(self.__container.layouts_manager.restore_layout, layout.identity)))
self.__custom_layouts_menu.addSeparator()
for layout in self.__user_layouts:
self.__custom_layouts_menu.addAction(self.__container.actions_manager.register_action(
"Actions|Umbra|ToolBar|Layouts|Store layout {0}".format(layout.name),
shortcut=Qt.CTRL + layout.shortcut,
slot=functools.partial(self.__container.layouts_manager.store_layout, layout.identity)))
self.__custom_layouts_menu.addSeparator()
self.__custom_layouts_menu.addAction(self.__container.actions_manager.register_action(
"Actions|Umbra|ToolBar|Layouts|Toggle FullScreen",
shortcut=Qt.ControlModifier + Qt.SHIFT + Qt.Key_F,
slot=self.__container.toggle_full_screen))
layout_active_label.set_menu(self.__custom_layouts_menu)
return layout_active_label
|
Provides the default **Custom_Layouts_active_label** widget.
:return: Layout active label.
:rtype: Active_QLabel
|
27,152 |
def get_rpms(self):
tags = [
,
,
,
,
,
,
,
,
]
cmd = "/bin/rpm " + rpm_qf_args(tags)
try:
(status, output) = subprocess.getstatusoutput(cmd)
except AttributeError:
with open(, ) as devnull:
p = subprocess.Popen(cmd,
shell=True,
stdin=devnull,
stdout=subprocess.PIPE,
stderr=devnull)
(stdout, stderr) = p.communicate()
status = p.wait()
output = stdout.decode()
if status != 0:
self.log.debug("%s: stderr output: %s", cmd, stderr)
raise RuntimeError("%s: exit code %s" % (cmd, status))
return parse_rpm_output(output.splitlines(), tags)
|
Build a list of installed RPMs in the format required for the
metadata.
|
27,153 |
def focus(self):
focus_msg = FocusSignalMsg(None, self._focus)
self._focus = None
self.focus_signal.emit(focus_msg)
|
Unsets the focused element
|
27,154 |
def resample(y, orig_sr, target_sr, res_type=, fix=True, scale=False, **kwargs):
util.valid_audio(y, mono=False)
if orig_sr == target_sr:
return y
ratio = float(target_sr) / orig_sr
n_samples = int(np.ceil(y.shape[-1] * ratio))
if res_type in (, ):
y_hat = scipy.signal.resample(y, n_samples, axis=-1)
elif res_type == :
if int(orig_sr) != orig_sr or int(target_sr) != target_sr:
raise ParameterError()
orig_sr = int(orig_sr)
target_sr = int(target_sr)
gcd = np.gcd(orig_sr, target_sr)
y_hat = scipy.signal.resample_poly(y, target_sr // gcd, orig_sr // gcd, axis=-1)
else:
y_hat = resampy.resample(y, orig_sr, target_sr, filter=res_type, axis=-1)
if fix:
y_hat = util.fix_length(y_hat, n_samples, **kwargs)
if scale:
y_hat /= np.sqrt(ratio)
return np.ascontiguousarray(y_hat, dtype=y.dtype)
|
Resample a time series from orig_sr to target_sr
Parameters
----------
y : np.ndarray [shape=(n,) or shape=(2, n)]
audio time series. Can be mono or stereo.
orig_sr : number > 0 [scalar]
original sampling rate of `y`
target_sr : number > 0 [scalar]
target sampling rate
res_type : str
resample type (see note)
.. note::
By default, this uses `resampy`'s high-quality mode ('kaiser_best').
To use a faster method, set `res_type='kaiser_fast'`.
To use `scipy.signal.resample`, set `res_type='fft'` or `res_type='scipy'`.
To use `scipy.signal.resample_poly`, set `res_type='polyphase'`.
.. note::
When using `res_type='polyphase'`, only integer sampling rates are
supported.
fix : bool
adjust the length of the resampled signal to be of size exactly
`ceil(target_sr * len(y) / orig_sr)`
scale : bool
Scale the resampled signal so that `y` and `y_hat` have approximately
equal total energy.
kwargs : additional keyword arguments
If `fix==True`, additional keyword arguments to pass to
`librosa.util.fix_length`.
Returns
-------
y_hat : np.ndarray [shape=(n * target_sr / orig_sr,)]
`y` resampled from `orig_sr` to `target_sr`
Raises
------
ParameterError
If `res_type='polyphase'` and `orig_sr` or `target_sr` are not both
integer-valued.
See Also
--------
librosa.util.fix_length
scipy.signal.resample
resampy.resample
Notes
-----
This function caches at level 20.
Examples
--------
Downsample from 22 KHz to 8 KHz
>>> y, sr = librosa.load(librosa.util.example_audio_file(), sr=22050)
>>> y_8k = librosa.resample(y, sr, 8000)
>>> y.shape, y_8k.shape
((1355168,), (491671,))
|
27,155 |
def keep_max_priority_effects(effects):
priority_values = map(effect_priority, effects)
max_priority = max(priority_values)
return [e for (e, p) in zip(effects, priority_values) if p == max_priority]
|
Given a list of effects, only keep the ones with the maximum priority
effect type.
Parameters
----------
effects : list of MutationEffect subclasses
Returns list of same length or shorter
|
27,156 |
def cache_subdirectory(
reference_name=None,
annotation_name=None,
annotation_version=None):
if reference_name is None:
reference_name = ""
if annotation_name is None:
annotation_name = ""
if annotation_version is None:
annotation_version = ""
reference_dir = join(CACHE_BASE_SUBDIR, reference_name)
annotation_dir = "%s%s" % (annotation_name, annotation_version)
return join(reference_dir, annotation_dir)
|
Which cache subdirectory to use for a given annotation database
over a particular reference. All arguments can be omitted to just get
the base subdirectory for all pyensembl cached datasets.
|
27,157 |
def get_export_files(cursor, id, version, types, exports_dirs, read_file=True):
request = get_current_request()
type_info = dict(request.registry.settings[])
metadata = get_content_metadata(id, version, cursor)
legacy_id = metadata[]
legacy_version = metadata[]
reachable_dirs = [dir for dir in exports_dirs if safe_stat(dir)]
results = []
for type in list(types):
if type not in type_info:
raise ExportError("invalid type requested.".format(type))
file_extension = type_info[type][]
if metadata[] == MODULE_MIMETYPE and \
file_extension == :
continue
mimetype = type_info[type][]
filename = .format(id, version, file_extension)
legacy_filenames = [
.format(legacy_id, legacy_version, ext)
for ext in LEGACY_EXTENSION_MAP[file_extension]
]
slugify_title_filename = u.format(slugify(metadata[]),
version, file_extension)
for dir in reachable_dirs:
filepath = os.path.join(dir, filename)
try:
if read_file:
with open(filepath, ) as file:
stats = os.fstat(file.fileno())
contents = file.read()
else:
stats = os.stat(filepath)
contents = None
modtime = fromtimestamp(int(stats.st_mtime))
results.append((slugify_title_filename, mimetype,
stats.st_size, modtime, , contents))
break
except EnvironmentError:
pass
else:
legacy_file_found = False
for dir in reachable_dirs:
filepath = os.path.join(dir, filename)
legacy_filepaths = [os.path.join(dir, fn)
for fn in legacy_filenames]
for legacy_filepath in legacy_filepaths:
try:
if read_file:
with open(legacy_filepath, ) as file:
stats = os.fstat(file.fileno())
contents = file.read()
else:
stats = os.stat(legacy_filepath)
contents = None
modtime = fromtimestamp(stats.st_mtime)
os.link(legacy_filepath, filepath)
results.append((slugify_title_filename, mimetype,
stats.st_size, modtime, ,
contents))
legacy_file_found = True
break
except EnvironmentError:
pass
if legacy_file_found:
break
else:
filenames = [filename] + legacy_filenames
log_formatted_filenames = .join([.format(x)
for x in filenames])
logger.error("Could not find a file for at version "
"with any of the following file names:\n{}"
.format(id, version, log_formatted_filenames))
results.append((slugify_title_filename, mimetype,
0, None, , None))
return results
|
Retrieve files associated with document.
|
27,158 |
def load(self, content):
try:
config = yaml.load(content, Loader=self._loader(self._global_tags))
except yaml.YAMLError:
raise InvalidConfigError(_("Config is not valid yaml."))
try:
config = config[self.tool]
except (TypeError, KeyError):
return None
if not isinstance(config, dict):
config = self._apply_default(config, self._global_default)
else:
scoped_keys = set(key for key in self._scopes)
for key in config:
if key in scoped_keys:
tags, default = self._scopes[key]
if not default:
default = self._global_default
config[key] = self._apply_default(config[key], default)
self._apply_scope(config[key], tags)
else:
config[key] = self._apply_default(config[key], self._global_default)
self._validate(config)
return config
|
Parse yaml content.
|
27,159 |
def apply_mask(self, mask_img):
self.set_mask(mask_img)
return self.get_data(masked=True, smoothed=True, safe_copy=True)
|
First set_mask and the get_masked_data.
Parameters
----------
mask_img: nifti-like image, NeuroImage or str
3D mask array: True where a voxel should be used.
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Returns
-------
The masked data deepcopied
|
27,160 |
def strip(self, chars=None):
return self.__class__(
self._str_strip(, chars),
no_closing=chars and (closing_code in chars),
)
|
Like str.strip, except it returns the Colr instance.
|
27,161 |
def SMS_me(self, body):
logging.debug()
return self.text(self._credentials[], body)
|
Quickly send an SMS to yourself. Calls :py:meth:`SMS`.
* *stored credential name: PERSONAL_PHONE_NUMBER*
:param string body: The content of the SMS message.
|
27,162 |
def _add_timedeltalike_scalar(self, other):
if isna(other):
new_values = np.empty(len(self), dtype=)
new_values[:] = iNaT
return new_values
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc,
arr_mask=self._isnan).view()
new_values = self._maybe_mask_results(new_values)
return new_values.view()
|
Add a delta of a timedeltalike
return the i8 result view
|
27,163 |
def set_U(self, U):
self.Ubg = np.asarray(U)[np.newaxis,...]
|
Set background zonal flow
|
27,164 |
def moving_average(data, xcol, ycol, width):
out = []
try:
for i in range(len(data)):
n = 0
total = 0
for j in range(i - width, i + width + 1):
if j >= 0 and j < len(data):
total += data[j][ycol]
n += 1
out.append((data[i][xcol], float(total) / n))
except IndexError:
raise IndexError("bad data: %s,xcol=%d,ycol=%d,width=%d" % (data, xcol, ycol, width))
return out
|
Compute the moving average of YCOL'th column of each sample point
in DATA. In particular, for each element I in DATA,
this function extracts up to WIDTH*2+1 elements, consisting of
I itself, WIDTH elements before I, and WIDTH
elements after I. It then computes the mean of the YCOL'th
column of these elements, and it composes a two-element sample
consisting of XCOL'th element and the mean.
>>> data = [[10,20], [20,30], [30,50], [40,70], [50,5]]
... chart_data.moving_average(data, 0, 1, 1)
[(10, 25.0), (20, 33.333333333333336), (30, 50.0), (40, 41.666666666666664), (50, 37.5)]
The above value actually represents:
[(10, (20+30)/2), (20, (20+30+50)/3), (30, (30+50+70)/3),
(40, (50+70+5)/3), (50, (70+5)/2)]
|
27,165 |
def scale(self, by):
self.points *= np.asarray([by])
self._adjust_ports()
|
Scale the points in the Pattern.
Parameters
----------
by : float or np.ndarray, shape=(3,)
The factor to scale by. If a scalar, scale all directions isotropically.
If np.ndarray, scale each direction independently.
|
27,166 |
def is_token_valid(self, token):
try:
_tinfo = self.handler.info(token)
except KeyError:
return False
if is_expired(int(_tinfo[])) or _tinfo[]:
return False
session_info = self[_tinfo[]]
if session_info["oauth_state"] == "authz":
if _tinfo[] != self.handler[]:
return False
elif session_info["oauth_state"] == "token":
if _tinfo[] != self.handler[]:
return False
return True
|
Checks validity of a given token
:param token: Access or refresh token
|
27,167 |
def _reset_server_state(self) -> None:
self.last_helo_response = None
self._last_ehlo_response = None
self.esmtp_extensions = {}
self.supports_esmtp = False
self.server_auth_methods = []
|
Clear stored information about the server.
|
27,168 |
def _check_timers(self):
if self._timer_queue:
timer = self._timer_queue[0]
if timer[] < _current_time_millis():
self._timer_queue.pop(0)
self._logger.debug(.format(timer[], timer[].id))
self._add_event(timer[], [], {}, timer[], front=True)
else:
self._next_timeout = (
timer[] - _current_time_millis()) / 1000
if self._next_timeout < 0:
self._next_timeout = 0
else:
self._next_timeout = None
|
Check for expired timers.
If there are any timers that expired, place them in the event
queue.
|
27,169 |
def group_by(fn: Callable[[T], TR]):
def inner(seq: ActualIterable[T]) -> Dict[TR, List[T]]:
ret = defaultdict(list)
for each in seq:
ret[fn(each)].append(each)
return ret
return inner
|
>>> from Redy.Collections import Flow, Traversal
>>> x = [1, '1', 1.0]
>>> Flow(x)[Traversal.group_by(type)]
|
27,170 |
def prepare_files(self, finder):
if self.wheel_download_dir:
ensure_dir(self.wheel_download_dir)
self._walk_req_to_install(
functools.partial(self._prepare_file, finder))
|
Prepare process. Create temp directories, download and/or unpack files.
|
27,171 |
def cli_auth(context):
with context.io_manager.with_stdout() as fp:
with context.client_manager.with_client() as client:
info = []
client.auth()
if getattr(client, , None):
info.append((, client.auth_cache_path))
if getattr(client, , None):
info.append((, client.auth_url))
if getattr(client, , None):
info.append((, client.auth_user))
if getattr(client, , None):
info.append((, client.auth_key))
if getattr(client, , None):
info.append((, client.auth_tenant))
if getattr(client, , None):
info.append((, client.auth_methods))
if getattr(client, , None):
info.append((, client.storage_path))
if getattr(client, , None):
info.append((, client.cdn_path))
if getattr(client, , None):
info.append((, client.local_path))
if getattr(client, , None):
info.append((, .join(client.regions)))
if getattr(client, , None):
info.append((, client.default_region))
if getattr(client, , None):
info.append((, client.region))
if getattr(client, , None):
info.append((, client.snet))
if getattr(client, , None):
info.append((, client.storage_url))
if getattr(client, , None):
info.append((, client.cdn_url))
if getattr(client, , None):
info.append((, client.auth_token))
if not info:
info.append((
,
))
fmt = % (max(len(t) for t, v in info) + 1)
for t, v in info:
fp.write(fmt % (t + , v))
fp.flush()
|
Authenticates and then outputs the resulting information.
See :py:mod:`swiftly.cli.auth` for context usage information.
See :py:class:`CLIAuth` for more information.
|
27,172 |
def extract_lzma(archive, compression, cmd, verbosity, interactive, outdir):
return _extract(archive, compression, cmd, , verbosity, outdir)
|
Extract an LZMA archive with the lzma Python module.
|
27,173 |
def modifie_options(self, field_option, value):
options = dict(self["options"] or {}, **{field_option: value})
self.modifications["options"] = options
|
Set options in modifications.
All options will be stored since it should be grouped in the DB.
|
27,174 |
def add_tunnel_interface(self, interface_id, address, network_value,
zone_ref=None, comment=None):
interfaces = [{: [{: address, : network_value}]}]
interface = {: interface_id, : interfaces,
: zone_ref, : comment}
tunnel_interface = TunnelInterface(**interface)
self._engine.add_interface(tunnel_interface)
|
Creates a tunnel interface for a virtual engine.
:param str,int interface_id: the tunnel id for the interface, used as nicid also
:param str address: ip address of interface
:param str network_value: network cidr for interface; format: 1.1.1.0/24
:param str zone_ref: zone reference for interface can be name, href or Zone
:raises EngineCommandFailed: failure during creation
:return: None
|
27,175 |
def get_sstable_data_files(self, ks, table):
p = self.get_sstable_data_files_process(ks=ks, table=table)
out, _, _ = handle_external_tool_process(p, ["sstableutil", , , ks, table])
return sorted(filter(lambda s: s.endswith(), out.splitlines()))
|
Read sstable data files by using sstableutil, so we ignore temporary files
|
27,176 |
def _run_hooks(config, hooks, args, environ):
skips = _get_skips(environ)
cols = _compute_cols(hooks, args.verbose)
filenames = _all_filenames(args)
filenames = filter_by_include_exclude(filenames, , config[])
classifier = Classifier(filenames)
retval = 0
for hook in hooks:
retval |= _run_single_hook(classifier, hook, args, skips, cols)
if retval and config[]:
break
if retval and args.show_diff_on_failure and git.has_diff():
if args.all_files:
output.write_line(
,
)
output.write_line()
subprocess.call((, , , ))
return retval
|
Actually run the hooks.
|
27,177 |
def prevnext(resource):
_prev, _next = False, False
resource = xmlparser(resource)
prevnext = resource.xpath("//ti:prevnext", namespaces=XPATH_NAMESPACES)
if len(prevnext) > 0:
_next, _prev = None, None
prevnext = prevnext[0]
_next_xpath = prevnext.xpath("ti:next/ti:urn/text()", namespaces=XPATH_NAMESPACES, smart_strings=False)
_prev_xpath = prevnext.xpath("ti:prev/ti:urn/text()", namespaces=XPATH_NAMESPACES, smart_strings=False)
if len(_next_xpath):
_next = _next_xpath[0].split(":")[-1]
if len(_prev_xpath):
_prev = _prev_xpath[0].split(":")[-1]
return _prev, _next
|
Parse a resource to get the prev and next urn
:param resource: XML Resource
:type resource: etree._Element
:return: Tuple representing previous and next urn
:rtype: (str, str)
|
27,178 |
def display_latex(*objs, **kwargs):
raw = kwargs.pop(,False)
if raw:
for obj in objs:
publish_latex(obj)
else:
display(*objs, include=[,])
|
Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
|
27,179 |
def nearby(word):
w = any2unicode(word)
if w in _cache_nearby: return _cache_nearby[w]
words, scores = [], []
try:
for x in _vectors.neighbours(w):
words.append(x[0])
scores.append(x[1])
except: pass
_cache_nearby[w] = (words, scores)
return words, scores
|
Nearby word
|
27,180 |
def get_void_volume_surfarea(structure, rad_dict=None, chan_rad=0.3,
probe_rad=0.1):
with ScratchDir():
name = "temp_zeo"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_file = None
if rad_dict:
rad_file = name + ".rad"
with open(rad_file, ) as fp:
for el in rad_dict.keys():
fp.write("{0} {1}".format(el, rad_dict[el]))
atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, True, rad_file)
vol_str = volume(atmnet, 0.3, probe_rad, 10000)
sa_str = surface_area(atmnet, 0.3, probe_rad, 10000)
vol = None
sa = None
for line in vol_str.split("\n"):
if "Number_of_pockets" in line:
fields = line.split()
if float(fields[1]) > 1:
vol = -1.0
break
if float(fields[1]) == 0:
vol = -1.0
break
vol = float(fields[3])
for line in sa_str.split("\n"):
if "Number_of_pockets" in line:
fields = line.split()
if float(fields[1]) > 1:
sa = -1.0
break
if float(fields[1]) == 0:
sa = -1.0
break
sa = float(fields[3])
if not vol or not sa:
raise ValueError("Error in zeo++ output stream")
return vol, sa
|
Computes the volume and surface area of isolated void using Zeo++.
Useful to compute the volume and surface area of vacant site.
Args:
structure: pymatgen Structure containing vacancy
rad_dict(optional): Dictionary with short name of elements and their
radii.
chan_rad(optional): Minimum channel Radius.
probe_rad(optional): Probe radius for Monte Carlo sampling.
Returns:
volume: floating number representing the volume of void
|
27,181 |
def create_object(container, portal_type, **data):
if "id" in data:
id = data.pop("id")
logger.warn("Passed in ID omitted! Senaite LIMS "
"generates a proper ID for you" .format(id))
try:
if portal_type == "AnalysisRequest":
obj = create_analysisrequest(container, **data)
data = u.omit(data, "SampleType", "Analyses")
data["Client"] = container
else:
obj = api.create(container, portal_type)
except Unauthorized:
fail(401, "You are not allowed to create this content")
try:
update_object_with_data(obj, data)
except APIError:
container.manage_delObjects(obj.id)
raise
return obj
|
Creates an object slug
:returns: The new created content object
:rtype: object
|
27,182 |
def _validate_item(self, item):
if not isinstance(item, six.string_types):
raise ValidationError(
self._record,
"Text list field items must be strings, not ".format(item.__class__)
)
words = item.split()
item_length_type = self._field.field_definition.get()
item_max_length = self._field.field_definition.get()
item_min_length = self._field.field_definition.get()
if item_length_type is not None:
if item_length_type == :
if item_max_length is not None:
if len(words) > item_max_length:
raise ValidationError(
self._record,
"Field items cannot contain more than {} words".format(
self._field.name,
item_max_length
)
)
if item_min_length is not None:
if len(words) < item_min_length:
raise ValidationError(
self._record,
"Field items must contain at least {} words".format(
self._field.name,
item_min_length
)
)
else:
if item_max_length is not None:
if len(item) > item_max_length:
raise ValidationError(
self._record,
"Field items cannot contain more than {} characters".format(
self._field.name,
item_max_length
)
)
if item_min_length is not None:
if len(item) < item_min_length:
raise ValidationError(
self._record,
"Field items must contain at least {} characters".format(
self._field.name,
item_min_length
)
)
|
Validate char/word count
|
27,183 |
def _count_inversions(a, b):
a, a_counts = np.unique(a, return_counts=True)
b, b_counts = np.unique(b, return_counts=True)
inversions = 0
i = 0
j = 0
while i < len(a) and j < len(b):
if a[i] < b[j]:
i += 1
elif a[i] >= b[j]:
inversions += np.sum(a_counts[i:]) * b_counts[j]
j += 1
return inversions
|
Count the number of inversions in two numpy arrays:
# points i, j where a[i] >= b[j]
Parameters
----------
a, b : np.ndarray, shape=(n,) (m,)
The arrays to be compared.
This implementation is optimized for arrays with many
repeated values.
Returns
-------
inversions : int
The number of detected inversions
|
27,184 |
def reset(self, path, pretend=False):
self._notes = []
migrations = sorted(self._repository.get_ran(), reverse=True)
count = len(migrations)
if count == 0:
self._note("<info>Nothing to rollback.</info>")
else:
for migration in migrations:
self._run_down(path, {"migration": migration}, pretend)
return count
|
Rolls all of the currently applied migrations back.
:param path: The path
:type path: str
:param pretend: Whether we execute the migrations as dry-run
:type pretend: bool
:rtype: count
|
27,185 |
def forwards(self, orm):
"Write your forwards methods here."
orm.Ulica.objects.filter(stan_na__exact=u_akt_stan).update(aktywny=True)
orm.Ulica.objects.exclude(stan_na__exact=u_akt_stan).update(aktywny=False)
|
Write your forwards methods here.
|
27,186 |
def _format_lines(self, tokensource):
nocls = self.noclasses
lsep = self.lineseparator
getcls = self.ttype2class.get
c2s = self.class2style
escape_table = _escape_html_table
tagsfile = self.tagsfile
lspan =
line = []
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and % c2s[cclass][0] or
else:
cls = self._get_css_classes(ttype)
cspan = cls and % cls or
parts = value.translate(escape_table).split()
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base +=
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {: base, : filename,
: extension}
parts[0] = "<a href=\"%s
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
for part in parts[:-1]:
if line:
if lspan != cspan:
line.extend(((lspan and ), cspan, part,
(cspan and ), lsep))
else:
line.extend((part, (lspan and ), lsep))
yield 1, .join(line)
line = []
elif part:
yield 1, .join((cspan, part, (cspan and ), lsep))
else:
yield 1, lsep
if line and parts[-1]:
if lspan != cspan:
line.extend(((lspan and ), cspan, parts[-1]))
lspan = cspan
else:
line.append(parts[-1])
elif parts[-1]:
line = [cspan, parts[-1]]
lspan = cspan
if line:
line.extend(((lspan and ), lsep))
yield 1, .join(line)
|
Just format the tokens, without any wrapping tags.
Yield individual lines.
|
27,187 |
def iter_notifications(self, all=False, participating=False, since=None,
number=-1, etag=None):
url = self._build_url(, base_url=self._api)
params = {
: all,
: participating,
: timestamp_parameter(since)
}
for (k, v) in list(params.items()):
if not v:
del params[k]
return self._iter(int(number), url, Thread, params, etag)
|
Iterates over the notifications for this repository.
:param bool all: (optional), show all notifications, including ones
marked as read
:param bool participating: (optional), show only the notifications the
user is participating in directly
:param since: (optional), filters out any notifications updated
before the given time. This can be a `datetime` or an `ISO8601`
formatted date string, e.g., 2012-05-20T23:10:27Z
:type since: datetime or string
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Thread <github3.notifications.Thread>`
|
27,188 |
def print_exception(etype, value, tb, limit=None, file=None):
if file is None:
|
Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
This differs from print_tb() in the following ways: (1) if
traceback is not None, it prints a header "Traceback (most recent
call last):"; (2) it prints the exception type and value after the
stack trace; (3) if type is SyntaxError and value has the
appropriate format, it prints the line where the syntax error
occurred with a caret on the next line indicating the approximate
position of the error.
|
27,189 |
def set_custom(sld, tld, nameservers):
my-minion
opts = salt.utils.namecheap.get_opts()
opts[] = sld
opts[] = tld
opts[] = .join(nameservers)
response_xml = salt.utils.namecheap.post_request(opts)
if response_xml is None:
return False
dnsresult = response_xml.getElementsByTagName()[0]
return salt.utils.namecheap.string_to_value(dnsresult.getAttribute())
|
Sets domain to use custom DNS servers.
returns True if the custom nameservers were set successfully
sld
SLD of the domain name
tld
TLD of the domain name
nameservers
array of strings List of nameservers to be associated with this domain
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains_dns.set_custom sld tld nameserver
|
27,190 |
def get_scm_status(config, read_modules=False, repo_url=None, mvn_repo_local=None, additional_params=None):
global scm_status_cache
if config.artifact in scm_status_cache.keys():
result = scm_status_cache[config.artifact]
elif not read_modules and (("%s|False" % config.artifact) in scm_status_cache.keys()):
result = scm_status_cache["%s|False" % config.artifact]
else:
result = _get_scm_status(config, read_modules, repo_url, mvn_repo_local, additional_params)
if read_modules:
scm_status_cache[config.artifact] = result
if ("%s|False" % config.artifact) in scm_status_cache.keys():
del(scm_status_cache["%s|False" % config.artifact])
else:
scm_status_cache["%s|False" % config.artifact] = result
return result
|
Gets the artifact status (MavenArtifact instance) from SCM defined by config. Only the top-level artifact is read by
default, although it can be requested to read the whole available module structure.
:param config: artifact config (ArtifactConfig instance)
:param read_modules: if True all modules are read, otherwise only top-level artifact
:param repo_url: the URL of the repository to use
:param mvn_repo_local: local repository path
:param additional_params: additional params to add on command-line when running maven
|
27,191 |
def searchZone(self, zone, q=None, has_geo=False, callback=None, errback=None):
import ns1.zones
return zone.search(q, has_geo, callback=callback, errback=errback)
|
Search a zone for a given search query (e.g., for geological data, etc)
:param zone: NOT a string like loadZone - an already loaded ns1.zones.Zone, like one returned from loadZone
:return:
|
27,192 |
def lbrt(self):
return self._left, self._bottom, self._right, self._top
|
Return (left,bottom,right,top) as a tuple.
|
27,193 |
def annulus(script, radius=None, radius1=None, radius2=None, diameter=None,
diameter1=None, diameter2=None, cir_segments=32, color=None):
if radius is not None and diameter is None:
if radius1 is None and diameter1 is None:
radius1 = radius
if radius2 is None and diameter2 is None:
radius2 = 0
if diameter is not None:
if radius1 is None and diameter1 is None:
radius1 = diameter / 2
if radius2 is None and diameter2 is None:
radius2 = 0
if diameter1 is not None:
radius1 = diameter1 / 2
if diameter2 is not None:
radius2 = diameter2 / 2
if radius1 is None:
radius1 = 1
if radius2 is None:
radius2 = 0
filter_xml = .join([
,
,
% radius1,
,
,
,
,
% radius2,
,
,
,
,
% cir_segments,
,
,
,
])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer(, change_layer=True)
if color is not None:
vert_color.function(script, color=color)
return None
|
Create a 2D (surface) circle or annulus
radius1=1 # Outer radius of the circle
radius2=0 # Inner radius of the circle (if non-zero it creates an annulus)
color="" # specify a color name to apply vertex colors to the newly created mesh
OpenSCAD: parameters: diameter overrides radius, radius1 & radius2 override radius
|
27,194 |
def _is_nmrstar(string):
if (string[0:5] == u"data_" and u"save_" in string) or (string[0:5] == b"data_" and b"save_" in string):
return string
return False
|
Test if input string is in NMR-STAR format.
:param string: Input string.
:type string: :py:class:`str` or :py:class:`bytes`
:return: Input string if in NMR-STAR format or False otherwise.
:rtype: :py:class:`str` or :py:obj:`False`
|
27,195 |
def _get_config_fname():
directory = _get_vispy_app_dir()
if directory is None:
return None
fname = op.join(directory, )
if os.environ.get(, None) is not None:
fname = op.join(_TempDir(), )
return fname
|
Helper for the vispy config file
|
27,196 |
def current_site_url():
protocol = getattr(settings, , )
port = getattr(settings, , )
url = % (protocol, settings.SITE_DOMAIN)
if port:
url += % port
return url
|
Returns fully qualified URL (no trailing slash) for the current site.
|
27,197 |
def rej(vec, vec_onto):
import numpy as np
rej_vec = vec - proj(vec, vec_onto)
return rej_vec
|
Vector rejection.
Calculated by subtracting from `vec` the projection of `vec` onto
`vec_onto`:
.. math::
\\mathsf{vec} - \\mathrm{proj}\\left(\\mathsf{vec},
\\ \\mathsf{vec\\_onto}\\right)
Parameters
----------
vec
length-R |npfloat_| --
Vector to reject
vec_onto
length-R |npfloat_| --
Vector onto which `vec` is to be rejected
Returns
-------
rej_vec
length-R |npfloat_| --
Rejection of `vec` onto `vec_onto`
|
27,198 |
async def set_as_boot_disk(self):
await self._handler.set_boot_disk(
system_id=self.node.system_id, id=self.id)
|
Set as boot disk for this node.
|
27,199 |
def is_network_source_fw(cls, nwk, nwk_name):
if nwk is not None:
if nwk.source == fw_const.FW_CONST:
return True
return False
if nwk_name in fw_const.DUMMY_SERVICE_NWK and (
len(nwk_name) == len(fw_const.DUMMY_SERVICE_NWK) +
fw_const.SERVICE_NAME_EXTRA_LEN):
return True
if nwk_name in fw_const.IN_SERVICE_NWK and (
len(nwk_name) == len(fw_const.IN_SERVICE_NWK) +
fw_const.SERVICE_NAME_EXTRA_LEN):
return True
if nwk_name in fw_const.OUT_SERVICE_NWK and (
len(nwk_name) == len(fw_const.OUT_SERVICE_NWK) +
fw_const.SERVICE_NAME_EXTRA_LEN):
return True
return False
|
Check if SOURCE is FIREWALL, if yes return TRUE.
If source is None or entry not in NWK DB, check from Name.
Name should have constant AND length should match.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.