Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
4,300 | def weighted_mean_and_std(values, weights):
average = np.average(values, weights=weights, axis=0)
variance = np.dot(weights, (values - average) ** 2) / weights.sum()
return (average, np.sqrt(variance)) | Returns the weighted average and standard deviation.
values, weights -- numpy ndarrays with the same shape. |
4,301 | def compute_plot_size(plot):
if isinstance(plot, GridBox):
ndmapping = NdMapping({(x, y): fig for fig, y, x in plot.children}, kdims=[, ])
cols = ndmapping.groupby()
rows = ndmapping.groupby()
width = sum([max([compute_plot_size(f)[0] for f in col]) for col in cols])
height = sum([max([compute_plot_size(f)[1] for f in row]) for row in rows])
return width, height
elif isinstance(plot, (Div, ToolbarBox)):
return 0, 0
elif isinstance(plot, (Row, Column, WidgetBox, Tabs)):
if not plot.children: return 0, 0
if isinstance(plot, Row) or (isinstance(plot, ToolbarBox) and plot.toolbar_location not in [, ]):
w_agg, h_agg = (np.sum, np.max)
elif isinstance(plot, Tabs):
w_agg, h_agg = (np.max, np.max)
else:
w_agg, h_agg = (np.max, np.sum)
widths, heights = zip(*[compute_plot_size(child) for child in plot.children])
return w_agg(widths), h_agg(heights)
elif isinstance(plot, (Figure, Chart)):
if plot.plot_width:
width = plot.plot_width
else:
width = plot.frame_width + plot.min_border_right + plot.min_border_left
if plot.plot_height:
height = plot.plot_height
else:
height = plot.frame_height + plot.min_border_bottom + plot.min_border_top
return width, height
elif isinstance(plot, (Plot, DataTable, Spacer)):
return plot.width, plot.height
else:
return 0, 0 | Computes the size of bokeh models that make up a layout such as
figures, rows, columns, widgetboxes and Plot. |
4,302 | def create_protocol(self):
self.sessions += 1
protocol = self.protocol_factory(self)
protocol.copy_many_times_events(self)
return protocol | Create a new protocol via the :attr:`protocol_factory`
This method increase the count of :attr:`sessions` and build
the protocol passing ``self`` as the producer. |
4,303 | def integrate(self, wavelengths=None, **kwargs):
if in kwargs:
self._validate_flux_unit(kwargs[], wav_only=True)
x = self._validate_wavelengths(wavelengths)
try:
m = self.model.integral
except (AttributeError, NotImplementedError):
if conf.default_integrator == :
y = self(x, **kwargs)
result = abs(np.trapz(y.value, x=x.value))
result_unit = y.unit
else:
raise NotImplementedError(
.format(conf.default_integrator))
else:
start = x[0].value
stop = x[-1].value
result = (m(stop) - m(start))
result_unit = self._internal_flux_unit
if result_unit != units.THROUGHPUT:
if result_unit == units.PHOTLAM:
result_unit = u.photon / (u.cm**2 * u.s)
elif result_unit == units.FLAM:
result_unit = u.erg / (u.cm**2 * u.s)
else:
raise NotImplementedError(
.format(result_unit))
else:
result_unit *= self._internal_wave_unit
return result * result_unit | Perform integration.
This uses any analytical integral that the
underlying model has (i.e., ``self.model.integral``).
If unavailable, it uses the default fall-back integrator
set in the ``default_integrator`` configuration item.
If wavelengths are provided, flux or throughput is first resampled.
This is useful when user wants to integrate at specific end points
or use custom spacing; In that case, user can pass in desired
sampling array generated with :func:`numpy.linspace`,
:func:`numpy.logspace`, etc.
If not provided, then `waveset` is used.
Parameters
----------
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for integration.
If not a Quantity, assumed to be in Angstrom.
If `None`, `waveset` is used.
kwargs : dict
Optional keywords to ``__call__`` for sampling.
Returns
-------
result : `~astropy.units.quantity.Quantity`
Integrated result.
Raises
------
NotImplementedError
Invalid default integrator.
synphot.exceptions.SynphotError
`waveset` is needed but undefined or cannot integrate
natively in the given ``flux_unit``. |
4,304 | async def get_guild_count(self, bot_id: int=None):
if bot_id is None:
bot_id = self.bot_id
return await self.http.get_guild_count(bot_id) | This function is a coroutine.
Gets a guild count from discordbots.org
Parameters
==========
bot_id: int[Optional]
The bot_id of the bot you want to lookup.
Defaults to the Bot provided in Client init
Returns
=======
stats: dict
The guild count and shards of a bot.
The date object is returned in a datetime.datetime object |
4,305 | def pr0_to_likelihood_array(outcomes, pr0):
pr0 = pr0[np.newaxis, ...]
pr1 = 1 - pr0
if len(np.shape(outcomes)) == 0:
outcomes = np.array(outcomes)[None]
return np.concatenate([
pr0 if outcomes[idx] == 0 else pr1
for idx in range(safe_shape(outcomes))
]) | Assuming a two-outcome measurement with probabilities given by the
array ``pr0``, returns an array of the form expected to be returned by
``likelihood`` method.
:param numpy.ndarray outcomes: Array of integers indexing outcomes.
:param numpy.ndarray pr0: Array of shape ``(n_models, n_experiments)``
describing the probability of obtaining outcome ``0`` from each
set of model parameters and experiment parameters. |
4,306 | def search_grouping(stmt, name):
mod = stmt.i_orig_module
while stmt is not None:
if name in stmt.i_groupings:
g = stmt.i_groupings[name]
if (mod is not None and
mod != g.i_orig_module and
g.i_orig_module.keyword == ):
if mod.search_one(, g.i_orig_module.arg) is None:
return None
return g
stmt = stmt.parent
return None | Search for a grouping in scope
First search the hierarchy, then the module and its submodules. |
4,307 | def parse_file_name_starting_position(self):
groups = mod_re.findall(, self.file_name)
assert groups and len(groups) == 1 and len(groups[0]) == 4, .format(self.file_name)
groups = groups[0]
if groups[0] == :
latitude = float(groups[1])
else:
latitude = - float(groups[1])
if groups[2] == :
longitude = float(groups[3])
else:
longitude = - float(groups[3])
self.latitude = latitude
self.longitude = longitude | Returns (latitude, longitude) of lower left point of the file |
4,308 | def connect(servers=None, framed_transport=False, timeout=None,
retry_time=60, recycle=None, round_robin=None, max_retries=3):
if servers is None:
servers = [DEFAULT_SERVER]
return ThreadLocalConnection(servers, framed_transport, timeout,
retry_time, recycle, max_retries=max_retries) | Constructs a single ElasticSearch connection. Connects to a randomly chosen
server on the list.
If the connection fails, it will attempt to connect to each server on the
list in turn until one succeeds. If it is unable to find an active server,
it will throw a NoServerAvailable exception.
Failing servers are kept on a separate list and eventually retried, no
sooner than `retry_time` seconds after failure.
:keyword servers: [server]
List of ES servers with format: "hostname:port"
Default: [("127.0.0.1",9500)]
:keyword framed_transport: If True, use a TFramedTransport instead of a TBufferedTransport
:keyword timeout: Timeout in seconds (e.g. 0.5)
Default: None (it will stall forever)
:keyword retry_time: Minimum time in seconds until a failed server is reinstated. (e.g. 0.5)
Default: 60
:keyword recycle: Max time in seconds before an open connection is closed and returned to the pool.
Default: None (Never recycle)
:keyword max_retries: Max retry time on connection down
:keyword round_robin: *DEPRECATED*
:return ES client |
4,309 | def assert_credentials_match(self, verifier, authc_token, account):
cred_type = authc_token.token_info[]
try:
verifier.verify_credentials(authc_token, account[])
except IncorrectCredentialsException:
updated_account = self.update_failed_attempt(authc_token, account)
failed_attempts = updated_account[][cred_type].\
get(, [])
raise IncorrectCredentialsException(failed_attempts)
except ConsumedTOTPToken:
account[][cred_type][] = authc_token.credentials
self.cache_handler.set(domain= + self.name,
identifier=authc_token.identifier,
value=account) | :type verifier: authc_abcs.CredentialsVerifier
:type authc_token: authc_abcs.AuthenticationToken
:type account: account_abcs.Account
:returns: account_abcs.Account
:raises IncorrectCredentialsException: when authentication fails,
including unix epoch timestamps
of recently failed attempts |
4,310 | def console_exec(thread_id, frame_id, expression, dbg):
frame = dbg.find_frame(thread_id, frame_id)
is_multiline = expression.count() > 1
expression = str(expression.replace(, ))
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals)
if IPYTHON:
need_more = exec_code(CodeFragment(expression), updated_globals, frame.f_locals, dbg)
if not need_more:
pydevd_save_locals.save_locals(frame)
return need_more
interpreter = ConsoleWriter()
if not is_multiline:
try:
code = compile_command(expression)
except (OverflowError, SyntaxError, ValueError):
interpreter.showsyntaxerror()
return False
if code is None:
return True
else:
code = expression
try:
Exec(code, updated_globals, frame.f_locals)
except SystemExit:
raise
except:
interpreter.showtraceback()
else:
pydevd_save_locals.save_locals(frame)
return False | returns 'False' in case expression is partially correct |
4,311 | def item_properties(self, handle):
logger.debug("Getting properties for handle: {}".format(handle))
properties = {
: self.get_size_in_bytes(handle),
: self.get_utc_timestamp(handle),
: self.get_hash(handle),
: self.get_relpath(handle)
}
logger.debug("{} properties: {}".format(handle, properties))
return properties | Return properties of the item with the given handle. |
4,312 | def _set_show_system_info(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_system_info.show_system_info, is_leaf=True, yang_name="show-system-info", rest_name="show-system-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "rpc",
: ,
})
self.__show_system_info = t
if hasattr(self, ):
self._set() | Setter method for show_system_info, mapped from YANG variable /brocade_ras_ext_rpc/show_system_info (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_system_info is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_system_info() directly.
YANG Description: Shows the system information MAC etc. |
4,313 | def linspace(self, start, stop, n):
if n == 1: return [start]
L = [0.0] * n
nm1 = n - 1
nm1inv = 1.0 / nm1
for i in range(n):
L[i] = nm1inv * (start*(nm1 - i) + stop*i)
return L | Simple replacement for numpy linspace |
4,314 | def report(self):
data =
for sample in self.metadata:
if sample[self.analysistype].primers != :
sample[self.analysistype].report = os.path.join(sample[self.analysistype].reportdir,
.format(sample.name, self.analysistype))
strainspecific = .format(.join(sorted(sample[self.analysistype].targets)),
sample.name)
for gene in sorted(sample[self.analysistype].targets):
try:
percentidentity = sample[self.analysistype].blastresults[gene][]
if percentidentity > 50:
strainspecific += .format(percentidentity)
else:
strainspecific +=
except KeyError:
strainspecific +=
strainspecific +=
with open(sample[self.analysistype].report, ) as specificreport:
specificreport.write(strainspecific)
data += strainspecific
with open(os.path.join(self.reportdir, .format(self.analysistype)), ) as report:
report.write(data) | Create reports of the findings |
4,315 | def distribution_compatible(dist, supported_tags=None):
if supported_tags is None:
supported_tags = get_supported()
package = Package.from_href(dist.location)
if not package:
return False
return package.compatible(supported_tags) | Is this distribution compatible with the given interpreter/platform combination?
:param supported_tags: A list of tag tuples specifying which tags are supported
by the platform in question.
:returns: True if the distribution is compatible, False if it is unrecognized or incompatible. |
4,316 | def add_empty_fields(untl_dict):
for element in UNTL_XML_ORDER:
if element not in untl_dict:
try:
py_object = PYUNTL_DISPATCH[element](
content=,
qualifier=,
)
except:
try:
py_object = PYUNTL_DISPATCH[element](content=)
except:
try:
py_object = PYUNTL_DISPATCH[element]()
except:
raise PyuntlException(
)
else:
untl_dict[element] = [{: {}}]
else:
if not py_object.contained_children:
untl_dict[element] = [{: }]
else:
untl_dict[element] = [{: {}}]
else:
if not py_object.contained_children:
untl_dict[element] = [{: , : }]
else:
untl_dict[element] = [{: {}, : }]
for child in py_object.contained_children:
untl_dict[element][0].setdefault(, {})
untl_dict[element][0][][child] =
return untl_dict | Add empty values if UNTL fields don't have values. |
4,317 | def parse_task_declaration(self, declaration_subAST):
String file_nameString file_namefile_nameString
var_name = self.parse_declaration_name(declaration_subAST.attr("name"))
var_type = self.parse_declaration_type(declaration_subAST.attr("type"))
var_expressn = self.parse_declaration_expressn(declaration_subAST.attr("expression"), es=)
return (var_name, var_type, var_expressn) | Parses the declaration section of the WDL task AST subtree.
Examples:
String my_name
String your_name
Int two_chains_i_mean_names = 0
:param declaration_subAST: Some subAST representing a task declaration
like: 'String file_name'
:return: var_name, var_type, var_value
Example:
Input subAST representing: 'String file_name'
Output: var_name='file_name', var_type='String', var_value=None |
4,318 | def indent(lines, amount, ch=):
padding = amount * ch
return padding + ( + padding).join(lines.split()) | Indent the lines in a string by padding each one with proper number of pad characters |
4,319 | def _gassist_any(self,dg,dt,dt2,name,na=None,nodiag=False,memlimit=-1):
if self.lib is None:
raise ValueError("Not initialized.")
import numpy as np
from .auto import ftype_np,gtype_np
from .types import isint
if dg.dtype.char!=gtype_np:
raise ValueError(+dg.dtype.char++gtype_np)
if dt.dtype.char!=ftype_np or dt2.dtype.char!=ftype_np:
raise ValueError()
if len(dg.shape)!=2 or len(dt.shape)!=2 or len(dt2.shape)!=2:
raise ValueError()
if type(nodiag) is not bool:
raise ValueError()
if not isint(memlimit):
raise ValueError()
if not (na is None or isint(na)):
raise ValueError()
if na is not None and na<=0:
raise ValueError()
ng=dg.shape[0]
nt=dt2.shape[0]
ns=dg.shape[1]
nvx=na+1 if na else dg.max()+1
nd=1 if nodiag else 0
if nvx<2:
raise ValueError()
if dt.shape!=dg.shape or dt2.shape[1]!=ns:
raise ValueError()
if np.isnan(dt).sum()+np.isnan(dt2).sum()>0:
raise ValueError()
func=self.cfunc(name,rettype=,argtypes=[,,,,,,])
d=np.require(np.zeros((ng,nt),dtype=dt.dtype),requirements=[,,,])
dgr=np.require(dg,requirements=[,,,])
dtr=np.require(dt,requirements=[,,,])
dt2r=np.require(dt2,requirements=[,,,])
ret=func(dgr,dtr,dt2r,d,nvx,nd,memlimit)
ans={:ret,:d}
return ans | Calculates probability of gene i regulating gene j with genotype data assisted method,
with the recommended combination of multiple tests.
dg: numpy.ndarray(nt,ns,dtype=gtype(='u1' by default)) Genotype data.
Entry dg[i,j] is genotype i's value for sample j.
Each value must be among 0,1,...,na.
Genotype i must be best (and significant) eQTL of gene i (in dt).
dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A
Entry dt[i,j] is gene i's expression level for sample j.
Genotype i (in dg) must be best (and significant) eQTL of gene i.
dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B.
dt2 has the same format as dt, and can be identical with, different from, or a superset of dt.
When dt2 is a superset of (or identical with) dt, dt2 must be arranged
to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and
set parameter nodiag = 1.
name: actual C function name to call
na: Number of alleles the species have. It determintes the maximum number of values each genotype can take. When unspecified, it is automatically
determined as the maximum of dg.
nodiag: skip diagonal regulations, i.e. regulation A->B for A=B.
This should be set to True when A is a subset of B and aligned correspondingly.
memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage.
Return: dictionary with following keys:
ret:0 iff execution succeeded.
p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)).
Probability function from for recommended combination of multiple tests.
For more information on tests, see paper.
ftype and gtype can be found in auto.py. |
4,320 | def main(path_dir, requirements_name):
click.echo("\nWARNING: Uninstall libs it\nREMINDER: After uninstall libs, update your requirements file.\nUse the `pip freeze > requirements.txt` command.\n\nList of installed libs and your dependencies added on project\nrequirements that are not being used:\n')
check(path_dir, requirements_name) | Console script for imports. |
4,321 | def NotificationsPost(self, parameters):
if self.__SenseApiCall__(, , parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False | Create a notification on CommonSense.
If successful the result, including the notification_id, can be obtained from getResponse(), and should be a json string.
@param parameters (dictionary) - Dictionary containing the notification to create.
@note -
@return (bool) - Boolean indicating whether NotificationsPost was successful. |
4,322 | def add_job(self, job):
self.cur.execute("INSERT INTO jobs VALUES(?,?,?,?,?)", (
job["id"], job["description"], job["last-run"], job["next-run"], job["last-run-result"]))
return True | Adds a new job into the cache.
:param dict job: The job dictionary
:returns: True |
4,323 | def _geolocation_extract(response):
body = response.json()
if response.status_code in (200, 404):
return body
try:
error = body["error"]["errors"][0]["reason"]
except KeyError:
error = None
if response.status_code == 403:
raise exceptions._OverQueryLimit(response.status_code, error)
else:
raise exceptions.ApiError(response.status_code, error) | Mimics the exception handling logic in ``client._get_body``, but
for geolocation which uses a different response format. |
4,324 | def get_ips_by_equipment_and_environment(self, equip_nome, id_ambiente):
if id_ambiente is None:
raise InvalidParameterError(
u)
url = + str(equip_nome) + + str(id_ambiente)
code, xml = self.submit(None, , url)
return self.response(code, xml) | Search Group Equipment from by the identifier.
:param id_egroup: Identifier of the Group Equipment. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{'equipaments':
[{'nome': < name_equipament >, 'grupos': < id_group >,
'mark': {'id': < id_mark >, 'nome': < name_mark >},'modelo': < id_model >,
'tipo_equipamento': < id_type >,
'model': {'nome': , 'id': < id_model >, 'marca': < id_mark >},
'type': {id': < id_type >, 'tipo_equipamento': < name_type >},
'id': < id_equipment >}, ... ]}
:raise InvalidParameterError: Group Equipment is null and invalid.
:raise GrupoEquipamentoNaoExisteError: Group Equipment not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. |
4,325 | def dict_to_numpy_array(d):
return fromarrays(d.values(), np.dtype([(str(k), v.dtype) for k, v in d.items()])) | Convert a dict of 1d array to a numpy recarray |
4,326 | def _set_backreferences(self, context, items, **kwargs):
if initializing:
return
uid = api.get_uid(context)
raw = self.getRaw(context) or []
if isinstance(raw, basestring):
raw = [raw, ]
cur = set(raw)
new = set(map(api.get_uid, items))
removed = cur.difference(new)
for uid in removed:
source = api.get_object_by_uid(uid, None)
if source is None:
logger.warn("UID {} does not exist anymore".format(uid))
continue
self.unlink_reference(source, context)
for item in items:
self.link_reference(item, context) | Set the back references on the linked items
This will set an annotation storage on the referenced items which point
to the current context. |
4,327 | def save(self_or_cls, obj, basename, fmt=, key={}, info={}, options=None, **kwargs):
if info or key:
raise Exception()
if isinstance(obj, (Plot, NdWidget)):
plot = obj
else:
with StoreOptions.options(obj, options, **kwargs):
plot = self_or_cls.get_plot(obj)
if (fmt in list(self_or_cls.widgets.keys())+[]) and len(plot) > 1:
with StoreOptions.options(obj, options, **kwargs):
if isinstance(basename, basestring):
basename = basename+
self_or_cls.export_widgets(plot, basename, fmt)
return
rendered = self_or_cls(plot, fmt)
if rendered is None: return
(data, info) = rendered
encoded = self_or_cls.encode(rendered)
prefix = self_or_cls._save_prefix(info[])
if prefix:
encoded = prefix + encoded
if isinstance(basename, (BytesIO, StringIO)):
basename.write(encoded)
basename.seek(0)
else:
filename = % (basename, info[])
with open(filename, ) as f:
f.write(encoded) | Save a HoloViews object to file, either using an explicitly
supplied format or to the appropriate default. |
4,328 | def validate_args(args):
if not os.path.isdir(args.directory):
print "Directory {} does not exist".format(args.directory)
sys.exit(5)
return args | Call all required validation functions
:param args:
:return: |
4,329 | def run(path, code=None, params=None, **meta):
if in params:
ignore_decorators = params[]
else:
ignore_decorators = None
check_source_args = (code, path, ignore_decorators) if THIRD_ARG else (code, path)
return [{
: e.line,
: (e.message[0:4] + e.message[5:]
if e.message[4] == else e.message),
: ,
: e.code
} for e in PyDocChecker().check_source(*check_source_args)] | pydocstyle code checking.
:return list: List of errors. |
4,330 | def _variant_po_to_dict(tokens) -> CentralDogma:
dsl = FUNC_TO_DSL.get(tokens[FUNCTION])
if dsl is None:
raise ValueError(.format(tokens))
return dsl(
namespace=tokens[NAMESPACE],
name=tokens[NAME],
variants=[
_variant_to_dsl_helper(variant_tokens)
for variant_tokens in tokens[VARIANTS]
],
) | Convert a PyParsing data dictionary to a central dogma abundance (i.e., Protein, RNA, miRNA, Gene).
:type tokens: ParseResult |
4,331 | def _filter_insane_successors(self, successors):
old_successors = successors[::]
successors = [ ]
for i, suc in enumerate(old_successors):
if suc.solver.symbolic(suc.ip):
i + 1,
len(old_successors),
ip_int
)
return successors | Throw away all successors whose target doesn't make sense
This method is called after we resolve an indirect jump using an unreliable method (like, not through one of
the indirect jump resolvers, but through either pure concrete execution or backward slicing) to filter out the
obviously incorrect successors.
:param list successors: A collection of successors.
:return: A filtered list of successors
:rtype: list |
4,332 | def stop_process(self):
if self.process is not None:
self._user_stop = True
self.process.kill()
self.setReadOnly(True)
self._running = False | Stop the process (by killing it). |
4,333 | def csv_row_cleaner(rows):
result = []
for row in rows:
check_empty = len(exclude_empty_values(row)) > 1
check_set = len(set(exclude_empty_values(row))) > 1
check_last_allready = (result and result[-1] == row)
if check_empty and check_set and not check_last_allready:
result.append(row)
return result | Clean row checking:
- Not empty row.
- >=1 element different in a row.
- row allready in cleaned row result. |
4,334 | def _gather_from_files(self, config):
command_file = config.get_help_files()
cache_path = os.path.join(config.get_config_dir(), )
cols = _get_window_columns()
with open(os.path.join(cache_path, command_file), ) as help_file:
data = json.load(help_file)
self.add_exit()
commands = data.keys()
for command in commands:
branch = self.command_tree
for word in command.split():
if word not in self.completable:
self.completable.append(word)
if not branch.has_child(word):
branch.add_child(CommandBranch(word))
branch = branch.get_child(word)
description = data[command][]
self.descrip[command] = add_new_lines(description, line_min=int(cols) - 2 * TOLERANCE)
if in data[command]:
examples = []
for example in data[command][]:
examples.append([
add_new_lines(example[0], line_min=int(cols) - 2 * TOLERANCE),
add_new_lines(example[1], line_min=int(cols) - 2 * TOLERANCE)])
self.command_example[command] = examples
command_params = data[command].get(, {})
for param in command_params:
if not in command_params[param][]:
param_aliases = set()
for par in command_params[param][]:
param_aliases.add(par)
self.param_descript[command + " " + par] = \
add_new_lines(
command_params[param][] +
" " + command_params[param][],
line_min=int(cols) - 2 * TOLERANCE)
if par not in self.completable_param:
self.completable_param.append(par)
param_doubles = self.command_param_info.get(command, {})
for alias in param_aliases:
param_doubles[alias] = param_aliases
self.command_param_info[command] = param_doubles | gathers from the files in a way that is convienent to use |
4,335 | def changeTo(self, path):
dictionary = DictSingle(Pair(, StringSingle(path)))
self.value = [dictionary] | change value
Args:
path (str): the new environment path |
4,336 | def print_stack_trace(proc_obj, count=None, color=, opts={}):
"Print count entries of the stack trace"
if count is None:
n=len(proc_obj.stack)
else:
n=min(len(proc_obj.stack), count)
try:
for i in range(n):
print_stack_entry(proc_obj, i, color=color, opts=opts)
except KeyboardInterrupt:
pass
return | Print count entries of the stack trace |
4,337 | def open_project(self, path=None, restart_consoles=True,
save_previous_files=True):
self.switch_to_plugin()
if path is None:
basedir = get_home_dir()
path = getexistingdirectory(parent=self,
caption=_("Open project"),
basedir=basedir)
path = encoding.to_unicode_from_fs(path)
if not self.is_valid_project(path):
if path:
QMessageBox.critical(self, _(),
_("<b>%s</b> is not a Spyder project!") % path)
return
else:
path = encoding.to_unicode_from_fs(path)
self.add_to_recent(path)
if self.current_active_project is None:
if save_previous_files and self.main.editor is not None:
self.main.editor.save_open_files()
if self.main.editor is not None:
self.main.editor.set_option(,
getcwd_or_home())
if self.get_option():
self.show_explorer()
else:
if self.main.editor is not None:
self.set_project_filenames(
self.main.editor.get_open_filenames())
self.current_active_project = EmptyProject(path)
self.latest_project = EmptyProject(path)
self.set_option(, self.get_active_project_path())
self.setup_menu_actions()
self.sig_project_loaded.emit(path)
self.sig_pythonpath_changed.emit()
if restart_consoles:
self.restart_consoles() | Open the project located in `path` |
4,338 | def process_record_dataset(dataset, is_training, batch_size, shuffle_buffer,
parse_record_fn, num_epochs=1, num_gpus=None,
examples_per_epoch=None, dtype=tf.float32):
dataset = dataset.prefetch(buffer_size=batch_size)
if is_training:
mlperf_log.resnet_print(key=mlperf_log.INPUT_ORDER)
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
dataset = dataset.repeat(num_epochs)
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
lambda value: parse_record_fn(value, is_training, dtype),
batch_size=batch_size,
num_parallel_batches=1))
dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
return dataset | Given a Dataset with raw records, return an iterator over the records.
Args:
dataset: A Dataset representing raw records
is_training: A boolean denoting whether the input is for training.
batch_size: The number of samples per batch.
shuffle_buffer: The buffer size to use when shuffling records. A larger
value results in better randomness, but smaller values reduce startup
time and use less memory.
parse_record_fn: A function that takes a raw record and returns the
corresponding (image, label) pair.
num_epochs: The number of epochs to repeat the dataset.
num_gpus: The number of gpus used for training.
examples_per_epoch: The number of examples in an epoch.
dtype: Data type to use for images/features.
Returns:
Dataset of (image, label) pairs ready for iteration. |
4,339 | def object_info(lcc_server, objectid, db_collection_id):
privatesharedobject-%s-info.pngobjectids variability if available
- `varinfo`: variability comments, variability features, type tags,
period and epoch information if available
- `neighbors`: information on the neighboring objects of this object in
its parent light curve collection
- `xmatch`: information on any cross-matches to external catalogs
(e.g. KIC, EPIC, TIC, APOGEE, etc.)
- `finderchart`: a base-64 encoded PNG image of the objects light
curve. To convert this to an actual PNG, try the function:
`astrobase.checkplot.pkl_io._b64_to_file`.
- `pfmethods`: a list of period-finding methods applied to the object if
any. If this list is present, use the keys in it to get to the actual
period-finding results for each method. These will contain base-64
encoded PNGs of the periodogram and phased light curves using the best
three peaks in the periodogram, as well as period and epoch
information.
objectidcollection%s/api/object?%sgetting info for %s in collection %s from %sAuthorizationBearer: %sresultadditional info for object %s not found in collection: %scould not retrieve object info, URL used: %s, error code: %s, reason: %s' %
(url, e.code, e.reason))
return None | This gets information on a single object from the LCC-Server.
Returns a dict with all of the available information on an object, including
finding charts, comments, object type and variability tags, and
period-search results (if available).
If you have an LCC-Server API key present in `~/.astrobase/lccs/` that is
associated with an LCC-Server user account, objects that are visible to this
user will be returned, even if they are not visible to the public. Use this
to look up objects that have been marked as 'private' or 'shared'.
NOTE: you can pass the result dict returned by this function directly into
the `astrobase.checkplot.checkplot_pickle_to_png` function, e.g.::
astrobase.checkplot.checkplot_pickle_to_png(result_dict,
'object-%s-info.png' %
result_dict['objectid'])
to generate a quick PNG overview of the object information.
Parameters
----------
lcc_server : str
This is the base URL of the LCC-Server to talk to.
objectid : str
This is the unique database ID of the object to retrieve info for. This
is always returned as the `db_oid` column in LCC-Server search results.
db_collection_id : str
This is the collection ID which will be searched for the object. This is
always returned as the `collection` column in LCC-Server search results.
Returns
-------
dict
A dict containing the object info is returned. Some important items in
the result dict:
- `objectinfo`: all object magnitude, color, GAIA cross-match, and
object type information available for this object
- `objectcomments`: comments on the object's variability if available
- `varinfo`: variability comments, variability features, type tags,
period and epoch information if available
- `neighbors`: information on the neighboring objects of this object in
its parent light curve collection
- `xmatch`: information on any cross-matches to external catalogs
(e.g. KIC, EPIC, TIC, APOGEE, etc.)
- `finderchart`: a base-64 encoded PNG image of the object's DSS2 RED
finder chart. To convert this to an actual PNG, try the function:
`astrobase.checkplot.pkl_io._b64_to_file`.
- `magseries`: a base-64 encoded PNG image of the object's light
curve. To convert this to an actual PNG, try the function:
`astrobase.checkplot.pkl_io._b64_to_file`.
- `pfmethods`: a list of period-finding methods applied to the object if
any. If this list is present, use the keys in it to get to the actual
period-finding results for each method. These will contain base-64
encoded PNGs of the periodogram and phased light curves using the best
three peaks in the periodogram, as well as period and epoch
information. |
4,340 | def get(url, params=None, **kwargs):
r
kwargs.setdefault(, True)
return request(, url, params=params, **kwargs) | r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response |
4,341 | def cfg_convert(self, value):
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError(
% (value, rest))
return d | Default converter for the cfg:// protocol. |
4,342 | def get(self, key):
data = r_kv.get(key)
return build_response(dict(data=data, code=200)) | Get a key-value from storage according to the key name. |
4,343 | def compounding(start, stop, compound):
def clip(value):
return max(value, stop) if (start > stop) else min(value, stop)
curr = float(start)
while True:
yield clip(curr)
curr *= compound | Yield an infinite series of compounding values. Each time the
generator is called, a value is produced by multiplying the previous
value by the compound rate.
EXAMPLE:
>>> sizes = compounding(1., 10., 1.5)
>>> assert next(sizes) == 1.
>>> assert next(sizes) == 1 * 1.5
>>> assert next(sizes) == 1.5 * 1.5 |
4,344 | def assert_equals(actual, expected, ignore_order=False, ignore_index=False, all_close=False):
equals_, reason = equals(actual, expected, ignore_order, ignore_index, all_close, _return_reason=True)
assert equals_, .format(reason, actual.to_string(), expected.to_string()) | Assert 2 series are equal.
Like ``assert equals(series1, series2, ...)``, but with better hints at
where the series differ. See `equals` for
detailed parameter doc.
Parameters
----------
actual : ~pandas.Series
expected : ~pandas.Series
ignore_order : bool
ignore_index : bool
all_close : bool |
4,345 | def collection_choices():
from invenio_collections.models import Collection
return [(0, _())] + [
(c.id, c.name) for c in Collection.query.all()
] | Return collection choices. |
4,346 | def complain(error):
if callable(error):
if DEVELOP:
raise error()
elif DEVELOP:
raise error
else:
logger.warn_err(error) | Raises in develop; warns in release. |
4,347 | def transcode_to_utf8(filename, encoding):
tmp = tempfile.TemporaryFile()
for line in io.open(filename, encoding=encoding):
tmp.write(line.strip().encode())
tmp.seek(0)
return tmp | Convert a file in some other encoding into a temporary file that's in
UTF-8. |
4,348 | def download(self):
self.housekeeping()
self.rippled_history()
if self.resampling_frequencies is not None:
self.find_markets()
self.resample_time_series() | Walk from the current ledger index to the genesis ledger index,
and download transactions from rippled. |
4,349 | def bg_compensate(img, sigma, splinepoints, scale):
from PIL import Image
import pylab
from matplotlib.image import pil_to_array
from centrosome.filter import canny
import matplotlib
img = Image.open(img)
if img.mode==:
imgdata = np.fromstring(img.tostring(),np.uint8)
imgdata.shape=(int(imgdata.shape[0]/2),2)
imgdata = imgdata.astype(np.uint16)
hi,lo = (0,1) if img.tag.prefix == else (1,0)
imgdata = imgdata[:,hi]*256 + imgdata[:,lo]
img_size = list(img.size)
img_size.reverse()
new_img = imgdata.reshape(img_size)
if 281 in img.tag:
img = new_img.astype(np.float32) / img.tag[281][0]
elif np.max(new_img) < 4096:
img = new_img.astype(np.float32) / 4095.
else:
img = new_img.astype(np.float32) / 65535.
else:
img = pil_to_array(img)
pylab.subplot(1,3,1).imshow(img, cmap=matplotlib.cm.Greys_r)
pylab.show()
if len(img.shape)>2:
raise ValueError()
edges = canny(img, np.ones(img.shape, bool), 2, .1, .3)
ci = np.cumsum(edges, 0)
cj = np.cumsum(edges, 1)
i,j = np.mgrid[0:img.shape[0], 0:img.shape[1]]
mask = ci > 0
mask = mask & (cj > 0)
mask[1:,:] &= (ci[0:-1,:] < ci[-1,j[0:-1,:]])
mask[:,1:] &= (cj[:,0:-1] < cj[i[:,0:-1],-1])
import time
t0 = time.clock()
bg = backgr(img, mask, MODE_AUTO, sigma, splinepoints=splinepoints, scale=scale)
print("Executed in %f sec" % (time.clock() - t0))
bg[~mask] = img[~mask]
pylab.subplot(1,3,2).imshow(img - bg, cmap=matplotlib.cm.Greys_r)
pylab.subplot(1,3,3).imshow(bg, cmap=matplotlib.cm.Greys_r)
pylab.show() | Reads file, subtracts background. Returns [compensated image, background]. |
4,350 | def actually_possibly_award(self, **state):
user = state["user"]
force_timestamp = state.pop("force_timestamp", None)
awarded = self.award(**state)
if awarded is None:
return
if awarded.level is None:
assert len(self.levels) == 1
awarded.level = 1
awarded = awarded.level - 1
assert awarded < len(self.levels)
if (
not self.multiple and
BadgeAward.objects.filter(user=user, slug=self.slug, level=awarded)
):
return
extra_kwargs = {}
if force_timestamp is not None:
extra_kwargs["awarded_at"] = force_timestamp
badge = BadgeAward.objects.create(
user=user,
slug=self.slug,
level=awarded,
**extra_kwargs
)
self.send_badge_messages(badge)
badge_awarded.send(sender=self, badge_award=badge) | Does the actual work of possibly awarding a badge. |
4,351 | def run_qsnp(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
if utils.file_exists(out_file):
return out_file
paired = get_paired_bams(align_bams, items)
if paired.normal_bam:
region_files = []
regions = _clean_regions(items, region)
if regions:
for region in regions:
out_region_file = out_file.replace(".vcf.gz", _to_str(region) + ".vcf.gz")
region_file = _run_qsnp_paired(align_bams, items, ref_file,
assoc_files, region, out_region_file)
region_files.append(region_file)
out_file = combine_variant_files(region_files, out_file, ref_file, items[0]["config"])
if not region:
out_file = _run_qsnp_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
return out_file
else:
raise ValueError("qSNP only works on paired samples") | Run qSNP calling on paired tumor/normal. |
4,352 | def fill_phenotype_calls(self,phenotypes=None,inplace=False):
if phenotypes is None: phenotypes = list(self[].unique())
def _get_calls(label,phenos):
d = dict([(x,0) for x in phenos])
if label!=label: return d
d[label] = 1
return d
if inplace:
self[] = self.apply(lambda x: _get_calls(x[],phenotypes),1)
return
fixed = self.copy()
fixed[] = fixed.apply(lambda x: _get_calls(x[],phenotypes),1)
return fixed | Set the phenotype_calls according to the phenotype names |
4,353 | def _export_project_file(project, path, z, include_images, keep_compute_id, allow_all_nodes, temporary_dir):
images = []
with open(path) as f:
topology = json.load(f)
if "topology" in topology:
if "nodes" in topology["topology"]:
for node in topology["topology"]["nodes"]:
compute_id = node.get(, )
if node["node_type"] == "virtualbox" and node.get("properties", {}).get("linked_clone"):
raise aiohttp.web.HTTPConflict(text="Topology with a linked {} clone could not be exported. Use qemu instead.".format(node["node_type"]))
if not allow_all_nodes and node["node_type"] in ["virtualbox", "vmware", "cloud"]:
raise aiohttp.web.HTTPConflict(text="Topology with a {} could not be exported".format(node["node_type"]))
if not keep_compute_id:
node["compute_id"] = "local"
if "properties" in node and node["node_type"] != "docker":
for prop, value in node["properties"].items():
if node["node_type"] == "iou":
if not prop == "path":
continue
elif not prop.endswith("image"):
continue
if value is None or value.strip() == :
continue
if not keep_compute_id:
node["properties"][prop] = os.path.basename(value)
if include_images is True:
images.append({
: compute_id,
: value,
: node[]
})
if not keep_compute_id:
topology["topology"]["computes"] = []
local_images = set([i[] for i in images if i[] == ])
for image in local_images:
_export_local_images(project, image, z)
remote_images = set([
(i[], i[], i[])
for i in images if i[] != ])
for compute_id, image_type, image in remote_images:
yield from _export_remote_images(project, compute_id, image_type, image, z, temporary_dir)
z.writestr("project.gns3", json.dumps(topology).encode())
return images | Take a project file (.gns3) and patch it for the export
We rename the .gns3 project.gns3 to avoid the task to the client to guess the file name
:param path: Path of the .gns3 |
4,354 | def version(self, path, postmap=None, **params):
q = httpd.merge_query(path, postmap)
ans = {
: taskforce_version,
: .join(str(x) for x in sys.version_info[:3]),
}
ans[] = {
: platform.system(),
}
if self._httpd.allow_control:
ans[][] = platform.platform()
ans[][] = platform.release()
return self._format(ans, q) | Return the taskforce version.
Supports standard options. |
4,355 | def resolve(self, value=None):
if self.matcher:
self._init_matcher()
matcher = self.evaluate()
try:
value = self._transform(value)
self._assertion(matcher, value)
except AssertionError as ex:
raise ex
finally:
if self.deferred:
self.reset() | Resolve the current expression against the supplied value |
4,356 | def list_ip(self, instance_id):
output = self.client.describe_instances(InstanceIds=[instance_id])
output = output.get("Reservations")[0].get("Instances")[0]
ips = {}
ips[] = output.get("PrivateIpAddress")
ips[] = output.get("PublicIpAddress")
return ips | Add all IPs |
4,357 | def create_role(self, **kwargs):
role = self.role_model(**kwargs)
return self.put(role) | Creates and returns a new role from the given parameters. |
4,358 | def reset_password(self, token):
expired, invalid, user = \
self.security_utils_service.reset_password_token_status(token)
if invalid:
self.flash(
_(),
category=)
return self.redirect()
elif expired:
self.security_service.send_reset_password_instructions(user)
self.flash(_(,
email=user.email,
within=app.config.SECURITY_RESET_PASSWORD_WITHIN),
category=)
return self.redirect()
spa_redirect = app.config.SECURITY_API_RESET_PASSWORD_HTTP_GET_REDIRECT
if request.method == and spa_redirect:
return self.redirect(spa_redirect, token=token, _external=True)
form = self._get_form()
if form.validate_on_submit():
self.security_service.reset_password(user, form.password.data)
self.security_service.login_user(user)
self.after_this_request(self._commit)
self.flash(_(),
category=)
if request.is_json:
return self.jsonify({: user.get_auth_token(),
: user})
return self.redirect(,
)
elif form.errors and request.is_json:
return self.errors(form.errors)
return self.render(,
reset_password_form=form,
reset_password_token=token,
**self.security.run_ctx_processor()) | View function verify a users reset password token from the email we sent to them.
It also handles the form for them to set a new password.
Supports html and json requests. |
4,359 | def element_abund_marco(i_decay, stable_isotope_list,
stable_isotope_identifier,
mass_fractions_array_not_decayed,
mass_fractions_array_decayed):
global elem_abund
elem_abund = np.zeros(z_bismuth)
global elem_abund_decayed
elem_abund_decayed = np.zeros(z_bismuth)
global elem_prod_fac
elem_prod_fac = np.zeros(z_bismuth)
global elem_prod_fac_decayed
elem_prod_fac_decayed = np.zeros(z_bismuth)
for i in range(z_bismuth):
dummy = 0.
for j in range(len(spe)):
if znum_int[j] == i+1 and stable_isotope_identifier[j] > 0.5:
dummy = dummy + float(mass_fractions_array_not_decayed[j])
elem_abund[i] = dummy
for i in range(z_bismuth):
if index_stable[i] == 1:
elem_prod_fac[i] = float(old_div(elem_abund[i],solar_elem_abund[i]))
elif index_stable[i] == 0:
elem_prod_fac[i] = 0.
if i_decay == 2:
for i in range(z_bismuth):
dummy = 0.
for j in range(len(mass_fractions_array_decayed)):
if znum_int[cl[stable_isotope_list[j].capitalize()]] == i+1:
dummy = dummy + float(mass_fractions_array_decayed[j])
elem_abund_decayed[i] = dummy
for i in range(z_bismuth):
if index_stable[i] == 1:
elem_prod_fac_decayed[i] = float(old_div(elem_abund_decayed[i],solar_elem_abund[i]))
elif index_stable[i] == 0:
elem_prod_fac_decayed[i] = 0. | Given an array of isotopic abundances not decayed and a similar
array of isotopic abundances not decayed, here elements abundances,
and production factors for elements are calculated |
4,360 | def _grow(growth, walls, target, i, j, steps, new_steps, res):
growth[:] = 0
if target[i, j]:
res[0] = 1
res[1] = i
res[2] = j
return
step = 1
s0, s1 = growth.shape
step_len = 1
new_step_ind = 0
steps[new_step_ind, 0] = i
steps[new_step_ind, 1] = j
growth[i, j] = 1
while True:
for n in range(step_len):
i, j = steps[n]
for ii, jj in DIRECT_NEIGHBOURS:
pi = i + ii
pj = j + jj
if 0 <= pi < s0 and 0 <= pj < s1:
if growth[pi, pj] == 0 and not walls[pi, pj]:
growth[pi, pj] = step
if target[pi, pj]:
res[0] = 1
res[1] = pi
res[2] = pj
return
new_steps[new_step_ind, 0] = pi
new_steps[new_step_ind, 1] = pj
new_step_ind += 1
if new_step_ind == 0:
res[0] = 0
return
step += 1
steps, new_steps = new_steps, steps
step_len = new_step_ind
new_step_ind = 0 | fills [res] with [distance to next position where target == 1,
x coord.,
y coord. of that position in target]
using region growth
i,j -> pixel position
growth -> a work array, needed to measure the distance
steps, new_steps -> current and last positions of the region growth steps
using this instead of looking for the right step position in [growth]
should speed up the process |
4,361 | def lchisqprob(chisq,df):
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0
else:
return math.exp(x)
if chisq <=0 or df < 1:
return 1.0
a = 0.5 * chisq
if df%2 == 0:
even = 1
else:
even = 0
if df > 1:
y = ex(-a)
if even:
s = y
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0
else:
z = 0.5
if a > BIG:
if even:
e = 0.0
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while (z <= chisq):
e = math.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
return s
else:
if even:
e = 1.0
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while (z <= chisq):
e = e * (a/float(z))
c = c + e
z = z + 1.0
return (c*y+s)
else:
return s | Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df) |
4,362 | def drop_constant_column_levels(df):
columns = df.columns
constant_levels = [i for i, level in enumerate(columns.levels) if len(level) <= 1]
constant_levels.reverse()
for i in constant_levels:
columns = columns.droplevel(i)
df.columns = columns | drop the levels of a multi-level column dataframe which are constant
operates in place |
4,363 | def bridge_to_vlan(br):
*
cmd = .format(br)
result = __salt__[](cmd)
if result[] != 0:
return False
return int(result[]) | Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0 |
4,364 | def start_action(logger=None, action_type="", _serializers=None, **fields):
parent = current_action()
if parent is None:
return startTask(logger, action_type, _serializers, **fields)
else:
action = parent.child(logger, action_type, _serializers)
action._start(fields)
return action | Create a child L{Action}, figuring out the parent L{Action} from execution
context, and log the start message.
You can use the result as a Python context manager, or use the
L{Action.finish} API to explicitly finish it.
with start_action(logger, "yourapp:subsystem:dosomething",
entry=x) as action:
do(x)
result = something(x * 2)
action.addSuccessFields(result=result)
Or alternatively:
action = start_action(logger, "yourapp:subsystem:dosomething",
entry=x)
with action.context():
do(x)
result = something(x * 2)
action.addSuccessFields(result=result)
action.finish()
@param logger: The L{eliot.ILogger} to which to write messages, or
C{None} to use the default one.
@param action_type: The type of this action,
e.g. C{"yourapp:subsystem:dosomething"}.
@param _serializers: Either a L{eliot._validation._ActionSerializers}
instance or C{None}. In the latter case no validation or serialization
will be done for messages generated by the L{Action}.
@param fields: Additional fields to add to the start message.
@return: A new L{Action}. |
4,365 | async def _async_get_sshable_ips(self, ip_addresses):
async def _async_ping(ip_address):
try:
reader, writer = await asyncio.wait_for(
asyncio.open_connection(ip_address, 22), timeout=5)
except (OSError, TimeoutError):
return None
try:
line = await reader.readline()
finally:
writer.close()
if line.startswith(b):
return ip_address
ssh_ips = await asyncio.gather(*[
_async_ping(ip_address)
for ip_address in ip_addresses
])
return [
ip_address
for ip_address in ssh_ips
if ip_address is not None
] | Return list of all IP address that could be pinged. |
4,366 | def get_section_by_rva(self, rva):
for section in self.sections:
if section.contains_rva(rva):
return section
return None | Get the section containing the given address. |
4,367 | def _resolve_plt(self, addr, irsb, indir_jump):
if self.project.loader.all_elf_objects:
if not any([ addr in obj.reverse_plt for obj in self.project.loader.all_elf_objects ]):
return False
if not irsb.has_statements:
irsb = self.project.factory.block(irsb.addr, size=irsb.size).vex
simsucc = self.project.engines.default_engine.process(self._initial_state, irsb, force_addr=addr)
if len(simsucc.successors) == 1:
ip = simsucc.successors[0].ip
if ip._model_concrete is not ip:
target_addr = ip._model_concrete.value
if (self.project.loader.find_object_containing(target_addr, membership_check=False) is not
self.project.loader.main_object) \
or self.project.is_hooked(target_addr):
indir_jump.resolved_targets.add(target_addr)
l.debug("Address %
return True
return False | Determine if the IRSB at the given address is a PLT stub. If it is, concretely execute the basic block to
resolve the jump target.
:param int addr: Address of the block.
:param irsb: The basic block.
:param IndirectJump indir_jump: The IndirectJump instance.
:return: True if the IRSB represents a PLT stub and we successfully resolved the target.
False otherwise.
:rtype: bool |
4,368 | def set_orthogonal_selection(self, selection, value, fields=None):
if self._read_only:
err_read_only()
if not self._cache_metadata:
self._load_metadata_nosync()
indexer = OrthogonalIndexer(selection, self)
self._set_selection(indexer, value, fields=fields) | Modify data via a selection for each dimension of the array.
Parameters
----------
selection : tuple
A selection for each dimension of the array. May be any combination of int,
slice, integer array or Boolean array.
value : scalar or array-like
Value to be stored into the array.
fields : str or sequence of str, optional
For arrays with a structured dtype, one or more fields can be specified to set
data for.
Examples
--------
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.zeros((5, 5), dtype=int)
Set data for a selection of rows::
>>> z.set_orthogonal_selection(([1, 4], slice(None)), 1)
>>> z[...]
array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]])
Set data for a selection of columns::
>>> z.set_orthogonal_selection((slice(None), [1, 4]), 2)
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 2, 1, 1, 2],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 2, 1, 1, 2]])
Set data for a selection of rows and columns::
>>> z.set_orthogonal_selection(([1, 4], [1, 4]), 3)
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 3, 1, 1, 3],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 3, 1, 1, 3]])
For convenience, this functionality is also available via the `oindex` property.
E.g.::
>>> z.oindex[[1, 4], [1, 4]] = 4
>>> z[...]
array([[0, 2, 0, 0, 2],
[1, 4, 1, 1, 4],
[0, 2, 0, 0, 2],
[0, 2, 0, 0, 2],
[1, 4, 1, 1, 4]])
Notes
-----
Orthogonal indexing is also known as outer indexing.
Slices with step > 1 are supported, but slices with negative step are not.
See Also
--------
get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,
get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,
vindex, oindex, __getitem__, __setitem__ |
4,369 | def ParseFileObject(self, parser_mediator, file_object):
volume = pyfsntfs.volume()
try:
volume.open_file_object(file_object)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
.format(exception))
try:
usn_change_journal = volume.get_usn_change_journal()
self._ParseUSNChangeJournal(parser_mediator, usn_change_journal)
finally:
volume.close() | Parses a NTFS $UsnJrnl metadata file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object. |
4,370 | def get_category(self, id, **data):
return self.get("/categories/{0}/".format(id), data=data) | GET /categories/:id/
Gets a :format:`category` by ID as ``category``. |
4,371 | def log_response(response: str, trim_log_values: bool = False, **kwargs: Any) -> None:
return log_(response, response_logger, logging.INFO, trim=trim_log_values, **kwargs) | Log a response |
4,372 | def on_reset_compat_defaults_clicked(self, bnt):
self.settings.general.reset()
self.settings.general.reset()
self.reload_erase_combos() | Reset default values to compat_{backspace,delete} dconf
keys. The default values are retrivied from the guake.schemas
file. |
4,373 | def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
name = referred_cls.__name__.lower() + "_ref"
return name | Overriding naming schemes. |
4,374 | def credit_note(request, note_id, access_code=None):
note_id = int(note_id)
current_note = CreditNoteController.for_id_or_404(note_id)
apply_form = forms.ApplyCreditNoteForm(
current_note.credit_note.invoice.user,
request.POST or None,
prefix="apply_note"
)
refund_form = forms.ManualCreditNoteRefundForm(
request.POST or None,
prefix="refund_note"
)
cancellation_fee_form = forms.CancellationFeeForm(
request.POST or None,
prefix="cancellation_fee"
)
if request.POST and apply_form.is_valid():
inv_id = apply_form.cleaned_data["invoice"]
invoice = commerce.Invoice.objects.get(pk=inv_id)
current_note.apply_to_invoice(invoice)
messages.success(
request,
"Applied credit note %d to invoice." % note_id,
)
return redirect("invoice", invoice.id)
elif request.POST and refund_form.is_valid():
refund_form.instance.entered_by = request.user
refund_form.instance.parent = current_note.credit_note
refund_form.save()
messages.success(
request,
"Applied manual refund to credit note."
)
refund_form = forms.ManualCreditNoteRefundForm(
prefix="refund_note",
)
elif request.POST and cancellation_fee_form.is_valid():
percentage = cancellation_fee_form.cleaned_data["percentage"]
invoice = current_note.cancellation_fee(percentage)
messages.success(
request,
"Generated cancellation fee for credit note %d." % note_id,
)
return redirect("invoice", invoice.invoice.id)
data = {
"credit_note": current_note.credit_note,
"apply_form": apply_form,
"refund_form": refund_form,
"cancellation_fee_form": cancellation_fee_form,
}
return render(request, "registrasion/credit_note.html", data) | Displays a credit note.
If ``request`` is a ``POST`` request, forms for applying or refunding
a credit note will be processed.
This view requires a login, and the logged in user must be staff.
Arguments:
note_id (castable to int): The ID of the credit note to view.
Returns:
render or redirect:
If the "apply to invoice" form is correctly processed, redirect to
that invoice, otherwise, render ``registration/credit_note.html``
with the following data::
{
"credit_note": models.commerce.CreditNote(),
"apply_form": form, # A form for applying credit note
# to an invoice.
"refund_form": form, # A form for applying a *manual*
# refund of the credit note.
"cancellation_fee_form" : form, # A form for generating an
# invoice with a
# cancellation fee
} |
4,375 | def n_members(self):
if self.is_finite:
return reduce(mul, [domain.n_members for domain in self._domains], 1)
else:
return np.inf | Returns the number of members in the domain if it
`is_finite`, otherwise, returns `np.inf`.
:type: ``int`` or ``np.inf`` |
4,376 | def inter_event_time_distribution(self, u=None, v=None):
dist = {}
if u is None:
first = True
delta = None
for ext in self.stream_interactions():
if first:
delta = ext
first = False
continue
disp = ext[-1] - delta[-1]
delta = ext
if disp in dist:
dist[disp] += 1
else:
dist[disp] = 1
elif u is not None and v is None:
delta = (0, 0, 0, 0)
flag = False
for ext in self.stream_interactions():
if ext[0] == u or ext[1] == u:
if flag:
disp = ext[-1] - delta[-1]
delta = ext
if disp in dist:
dist[disp] += 1
else:
dist[disp] = 1
else:
delta = ext
flag = True
else:
evt = self._adj[u][v][]
delta = []
for i in evt:
if i[0] != i[1]:
for j in [0, 1]:
delta.append(i[j])
else:
delta.append(i[0])
if len(delta) == 2 and delta[0] == delta[1]:
return {}
for i in range(0, len(delta) - 1):
e = delta[i + 1] - delta[i]
if e not in dist:
dist[e] = 1
else:
dist[e] += 1
return dist | Return the distribution of inter event time.
If u and v are None the dynamic graph intere event distribution is returned.
If u is specified the inter event time distribution of interactions involving u is returned.
If u and v are specified the inter event time distribution of (u, v) interactions is returned
Parameters
----------
u : node id
v : node id
Returns
-------
nd : dictionary
A dictionary from inter event time to number of occurrences |
4,377 | def create(self, to, from_, method=values.unset, fallback_url=values.unset,
fallback_method=values.unset, status_callback=values.unset,
status_callback_event=values.unset,
status_callback_method=values.unset, send_digits=values.unset,
timeout=values.unset, record=values.unset,
recording_channels=values.unset,
recording_status_callback=values.unset,
recording_status_callback_method=values.unset,
sip_auth_username=values.unset, sip_auth_password=values.unset,
machine_detection=values.unset,
machine_detection_timeout=values.unset,
recording_status_callback_event=values.unset, trim=values.unset,
caller_id=values.unset,
machine_detection_speech_threshold=values.unset,
machine_detection_speech_end_threshold=values.unset,
machine_detection_silence_timeout=values.unset, url=values.unset,
application_sid=values.unset):
data = values.of({
: to,
: from_,
: url,
: application_sid,
: method,
: fallback_url,
: fallback_method,
: status_callback,
: serialize.map(status_callback_event, lambda e: e),
: status_callback_method,
: send_digits,
: timeout,
: record,
: recording_channels,
: recording_status_callback,
: recording_status_callback_method,
: sip_auth_username,
: sip_auth_password,
: machine_detection,
: machine_detection_timeout,
: serialize.map(recording_status_callback_event, lambda e: e),
: trim,
: caller_id,
: machine_detection_speech_threshold,
: machine_detection_speech_end_threshold,
: machine_detection_silence_timeout,
})
payload = self._version.create(
,
self._uri,
data=data,
)
return CallInstance(self._version, payload, account_sid=self._solution[], ) | Create a new CallInstance
:param unicode to: Phone number, SIP address, or client identifier to call
:param unicode from_: Twilio number from which to originate the call
:param unicode method: HTTP method to use to fetch TwiML
:param unicode fallback_url: Fallback URL in case of error
:param unicode fallback_method: HTTP Method to use with fallback_url
:param unicode status_callback: The URL we should call to send status information to your application
:param unicode status_callback_event: The call progress events that we send to the `status_callback` URL.
:param unicode status_callback_method: HTTP Method to use with status_callback
:param unicode send_digits: The digits to dial after connecting to the number
:param unicode timeout: Number of seconds to wait for an answer
:param bool record: Whether or not to record the call
:param unicode recording_channels: The number of channels in the final recording
:param unicode recording_status_callback: The URL that we call when the recording is available to be accessed
:param unicode recording_status_callback_method: The HTTP method we should use when calling the `recording_status_callback` URL
:param unicode sip_auth_username: The username used to authenticate the caller making a SIP call
:param unicode sip_auth_password: The password required to authenticate the user account specified in `sip_auth_username`.
:param unicode machine_detection: Enable machine detection or end of greeting detection
:param unicode machine_detection_timeout: Number of seconds to wait for machine detection
:param unicode recording_status_callback_event: The recording status events that will trigger calls to the URL specified in `recording_status_callback`
:param unicode trim: Set this parameter to control trimming of silence on the recording.
:param unicode caller_id: The phone number, SIP address, or Client identifier that made this call. Phone numbers are in E.164 format (e.g., +16175551212). SIP addresses are formatted as `[email protected]`.
:param unicode machine_detection_speech_threshold: Number of milliseconds for measuring stick for the length of the speech activity
:param unicode machine_detection_speech_end_threshold: Number of milliseconds of silence after speech activity
:param unicode machine_detection_silence_timeout: Number of milliseconds of initial silence
:param unicode url: The absolute URL that returns TwiML for this call
:param unicode application_sid: The SID of the Application resource that will handle the call
:returns: Newly created CallInstance
:rtype: twilio.rest.api.v2010.account.call.CallInstance |
4,378 | def dataset_path(cache=None, cachefile="~/.io3d_cache.yaml", get_root=False):
local_data_dir = local_dir
if cachefile is not None:
cache = cachef.CacheFile(cachefile)
if cache is not None:
local_data_dir = cache.get_or_save_default("local_dataset_dir", local_dir)
if get_root:
local_data_dir
else:
logger.warning("Parameter")
local_data_dir = op.join(local_data_dir, "medical", "orig")
return op.expanduser(local_data_dir) | Get dataset path.
:param cache: CacheFile object
:param cachefile: cachefile path, default '~/.io3d_cache.yaml'
:return: path to dataset |
4,379 | def is_dsub_operation(cls, op):
if not cls.is_pipelines_operation(op):
return False
for name in [, , ]:
if not cls.get_operation_label(op, name):
return False
return True | Determine if a pipelines operation is a dsub request.
We don't have a rigorous way to identify an operation as being submitted
by dsub. Our best option is to check for certain fields that have always
been part of dsub operations.
- labels: job-id, job-name, and user-id have always existed
- envs: _SCRIPT has always existed.
In order to keep a simple heuristic this test only uses labels.
Args:
op: a pipelines operation.
Returns:
Boolean, true if the pipeline run was generated by dsub. |
4,380 | def run_nested(self, nlive_init=500, maxiter_init=None,
maxcall_init=None, dlogz_init=0.01, logl_max_init=np.inf,
nlive_batch=500, wt_function=None, wt_kwargs=None,
maxiter_batch=None, maxcall_batch=None,
maxiter=None, maxcall=None, maxbatch=None,
stop_function=None, stop_kwargs=None, use_stop=True,
save_bounds=True, print_progress=True, print_func=None,
live_points=None):
if maxcall is None:
maxcall = sys.maxsize
if maxiter is None:
maxiter = sys.maxsize
if maxiter_batch is None:
maxiter_batch = sys.maxsize
if maxcall_batch is None:
maxcall_batch = sys.maxsize
if maxbatch is None:
maxbatch = sys.maxsize
if maxiter_init is None:
maxiter_init = sys.maxsize
if maxcall_init is None:
maxcall_init = sys.maxsize
if wt_function is None:
wt_function = weight_function
if wt_kwargs is None:
wt_kwargs = dict()
if stop_function is None:
stop_function = stopping_function
if stop_kwargs is None:
stop_kwargs = dict()
if print_func is None:
print_func = print_fn
ncall = self.ncall
niter = self.it - 1
logl_bounds = (-np.inf, np.inf)
maxcall_init = min(maxcall_init, maxcall)
maxiter_init = min(maxiter_init, maxiter)
if not self.base:
for results in self.sample_initial(nlive=nlive_init,
dlogz=dlogz_init,
maxcall=maxcall_init,
maxiter=maxiter_init,
logl_max=logl_max_init,
live_points=live_points):
(worst, ustar, vstar, loglstar, logvol,
logwt, logz, logzvar, h, nc, worst_it,
boundidx, bounditer, eff, delta_logz) = results
ncall += nc
niter += 1
if print_progress:
print_func(results, niter, ncall, nbatch=0,
dlogz=dlogz_init, logl_max=logl_max_init)
for n in range(self.batch, maxbatch):
res = self.results
mcall = min(maxcall - ncall, maxcall_batch)
miter = min(maxiter - niter, maxiter_batch)
if mcall > 0 and miter > 0 and use_stop:
if self.use_pool_stopfn:
M = self.M
else:
M = map
stop, stop_vals = stop_function(res, stop_kwargs,
rstate=self.rstate, M=M,
return_vals=True)
stop_post, stop_evid, stop_val = stop_vals
else:
stop = False
stop_val = np.NaN
if mcall > 0 and miter > 0 and not stop:
passback = self.add_batch(nlive=nlive_batch,
wt_function=wt_function,
wt_kwargs=wt_kwargs,
maxiter=miter,
maxcall=mcall,
save_bounds=save_bounds,
print_progress=print_progress,
print_func=print_func,
stop_val=stop_val)
ncall, niter, logl_bounds, results = passback
elif logl_bounds[1] != np.inf:
break
if print_progress:
sys.stderr.write("\n") | **The main dynamic nested sampling loop.** After an initial "baseline"
run using a constant number of live points, dynamically allocates
additional (nested) samples to optimize a specified weight function
until a specified stopping criterion is reached.
Parameters
----------
nlive_init : int, optional
The number of live points used during the initial ("baseline")
nested sampling run. Default is `500`.
maxiter_init : int, optional
Maximum number of iterations for the initial baseline nested
sampling run. Iteration may stop earlier if the
termination condition is reached. Default is `sys.maxsize`
(no limit).
maxcall_init : int, optional
Maximum number of likelihood evaluations for the initial
baseline nested sampling run. Iteration may stop earlier
if the termination condition is reached. Default is `sys.maxsize`
(no limit).
dlogz_init : float, optional
The baseline run will stop when the estimated contribution of the
remaining prior volume to the total evidence falls below
this threshold. Explicitly, the stopping criterion is
`ln(z + z_est) - ln(z) < dlogz`, where `z` is the current
evidence from all saved samples and `z_est` is the estimated
contribution from the remaining volume. The default is
`0.01`.
logl_max_init : float, optional
The baseline run will stop when the sampled ln(likelihood) exceeds
this threshold. Default is no bound (`np.inf`).
nlive_batch : int, optional
The number of live points used when adding additional samples
from a nested sampling run within each batch. Default is `500`.
wt_function : func, optional
A cost function that takes a :class:`Results` instance
and returns a log-likelihood range over which a new batch of
samples should be generated. The default function simply
computes a weighted average of the posterior and evidence
information content as::
weight = pfrac * pweight + (1. - pfrac) * zweight
wt_kwargs : dict, optional
Extra arguments to be passed to the weight function.
maxiter_batch : int, optional
Maximum number of iterations for the nested
sampling run within each batch. Iteration may stop earlier
if the termination condition is reached. Default is `sys.maxsize`
(no limit).
maxcall_batch : int, optional
Maximum number of likelihood evaluations for the nested
sampling run within each batch. Iteration may stop earlier
if the termination condition is reached. Default is `sys.maxsize`
(no limit).
maxiter : int, optional
Maximum number of iterations allowed. Default is `sys.maxsize`
(no limit).
maxcall : int, optional
Maximum number of likelihood evaluations allowed.
Default is `sys.maxsize` (no limit).
maxbatch : int, optional
Maximum number of batches allowed. Default is `sys.maxsize`
(no limit).
stop_function : func, optional
A function that takes a :class:`Results` instance and
returns a boolean indicating that we should terminate the run
because we've collected enough samples.
stop_kwargs : float, optional
Extra arguments to be passed to the stopping function.
use_stop : bool, optional
Whether to evaluate our stopping function after each batch.
Disabling this can improve performance if other stopping criteria
such as :data:`maxcall` are already specified. Default is `True`.
save_bounds : bool, optional
Whether or not to save distributions used to bound
the live points internally during dynamic live point allocation.
Default is `True`.
print_progress : bool, optional
Whether to output a simple summary of the current run that
updates each iteration. Default is `True`.
print_func : function, optional
A function that prints out the current state of the sampler.
If not provided, the default :meth:`results.print_fn` is used.
live_points : list of 3 `~numpy.ndarray` each with shape (nlive, ndim)
A set of live points used to initialize the nested sampling run.
Contains `live_u`, the coordinates on the unit cube, `live_v`, the
transformed variables, and `live_logl`, the associated
loglikelihoods. By default, if these are not provided the initial
set of live points will be drawn from the unit `npdim`-cube.
**WARNING: It is crucial that the initial set of live points have
been sampled from the prior. Failure to provide a set of valid
live points will result in biased results.** |
4,381 | def get_objective_bank_admin_session(self, proxy, *args, **kwargs):
if not self.supports_objective_bank_admin():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.ObjectiveBankAdminSession(proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | Gets the OsidSession associated with the objective bank administration service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveBankAdminSession``
:rtype: ``osid.learning.ObjectiveBankAdminSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_bank_admin() is false``
*compliance: optional -- This method must be implemented if ``supports_objective_bank_admin()`` is true.* |
4,382 | def _find_max_lag(x, rho_limit=0.05, maxmaxlag=20000, verbose=0):
acv = autocov(x)
rho = acv[0, 1] / acv[0, 0]
lam = -1. / np.log(abs(rho))
maxlag = int(np.floor(3. * lam)) + 1
jump = int(np.ceil(0.01 * lam)) + 1
T = len(x)
while ((abs(rho) > rho_limit) & (maxlag < min(T / 2, maxmaxlag))):
acv = autocov(x, maxlag)
rho = acv[0, 1] / acv[0, 0]
maxlag += jump
maxlag = int(np.floor(1.3 * maxlag))
if maxlag >= min(T / 2, maxmaxlag):
maxlag = min(min(T / 2, maxlag), maxmaxlag)
"maxlag fixed to %d" % maxlag
return maxlag
if maxlag <= 1:
print_("maxlag = %d, fixing value to 10" % maxlag)
return 10
if verbose:
print_("maxlag = %d" % maxlag)
return maxlag | Automatically find an appropriate maximum lag to calculate IAT |
4,383 | def run_ipython_notebook(notebook_str):
from runipy.notebook_runner import NotebookRunner
import nbformat
import logging
log_format =
log_datefmt =
logging.basicConfig(
level=logging.INFO, format=log_format, datefmt=log_datefmt
)
print()
nb4 = nbformat.reads(notebook_str, 4)
runner = NotebookRunner(nb4)
runner.run_notebook(skip_exceptions=False)
run_nb = runner.nb
return run_nb | References:
https://github.com/paulgb/runipy
>>> from utool.util_ipynb import * # NOQA |
4,384 | def _calc(self, y, w):
if self.discrete:
self.lclass_ids = weights.lag_categorical(w, self.class_ids,
ties="tryself")
else:
ly = weights.lag_spatial(w, y)
self.lclass_ids, self.lag_cutoffs, self.m = self._maybe_classify(
ly, self.m, self.lag_cutoffs)
self.lclasses = np.arange(self.m)
T = np.zeros((self.m, self.k, self.k))
n, t = y.shape
for t1 in range(t - 1):
t2 = t1 + 1
for i in range(n):
T[self.lclass_ids[i, t1], self.class_ids[i, t1],
self.class_ids[i, t2]] += 1
P = np.zeros_like(T)
for i, mat in enumerate(T):
row_sum = mat.sum(axis=1)
row_sum = row_sum + (row_sum == 0)
p_i = np.matrix(np.diag(1. / row_sum) * np.matrix(mat))
P[i] = p_i
return T, P | Helper to estimate spatial lag conditioned Markov transition
probability matrices based on maximum likelihood techniques. |
4,385 | def Ctrl_C(self, delay=0):
self._delay(delay)
self.add(Command("KeyDown", % (BoardKey.Ctrl, 1)))
self.add(Command("KeyPress", % (BoardKey.C, 1)))
self.add(Command("KeyUp", % (BoardKey.Ctrl, 1))) | Ctrl + C shortcut. |
4,386 | def move_dirty_lock_file(dirty_lock_file, sm_path):
if dirty_lock_file is not None \
and not dirty_lock_file == os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1]):
logger.debug("Move dirty lock from root tmp folder {0} to state machine folder {1}"
"".format(dirty_lock_file, os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1])))
os.rename(dirty_lock_file, os.path.join(sm_path, dirty_lock_file.split(os.sep)[-1])) | Move the dirt_lock file to the sm_path and thereby is not found by auto recovery of backup anymore |
4,387 | def marv(ctx, config, loglevel, logfilter, verbosity):
if config is None:
cwd = os.path.abspath(os.path.curdir)
while cwd != os.path.sep:
config = os.path.join(cwd, )
if os.path.exists(config):
break
cwd = os.path.dirname(cwd)
else:
config =
if not os.path.exists(config):
config = None
ctx.obj = config
setup_logging(loglevel, verbosity, logfilter) | Manage a Marv site |
4,388 | def network_info(host=None,
admin_username=None,
admin_password=None,
module=None):
inv = inventory(host=host, admin_username=admin_username,
admin_password=admin_password)
if inv is None:
cmd = {}
cmd[] = -1
cmd[] =
return cmd
if module not in inv.get() and module not in inv.get():
cmd = {}
cmd[] = -1
cmd[] = .format(module)
return cmd
cmd = __execute_ret(, host=host,
admin_username=admin_username,
admin_password=admin_password,
module=module)
if cmd[] != 0:
log.warning(, cmd[])
cmd[] = + + module + + \
cmd[]
return __parse_drac(cmd[]) | Return Network Configuration
CLI Example:
.. code-block:: bash
salt dell dracr.network_info |
4,389 | def guess_mime_type(url):
(mimetype, _mimeencoding) = mimetypes.guess_type(url)
if not mimetype:
ext = os.path.splitext(url)[1]
mimetype = _MIME_TYPES.get(ext)
_logger.debug("mimetype({}): {}".format(url, mimetype))
if not mimetype:
mimetype = "application/octet-stream"
return mimetype | Use the mimetypes module to lookup the type for an extension.
This function also adds some extensions required for HTML5 |
4,390 | def generate_confirmation_token(self, user):
data = [str(user.id), self.hash_data(user.email)]
return self.security.confirm_serializer.dumps(data) | Generates a unique confirmation token for the specified user.
:param user: The user to work with |
4,391 | def plot_slab(slab, ax, scale=0.8, repeat=5, window=1.5,
draw_unit_cell=True, decay=0.2, adsorption_sites=True):
orig_slab = slab.copy()
slab = reorient_z(slab)
orig_cell = slab.lattice.matrix.copy()
if repeat:
slab.make_supercell([repeat, repeat, 1])
coords = np.array(sorted(slab.cart_coords, key=lambda x: x[2]))
sites = sorted(slab.sites, key=lambda x: x.coords[2])
alphas = 1 - decay * (np.max(coords[:, 2]) - coords[:, 2])
alphas = alphas.clip(min=0)
corner = [0, 0, slab.lattice.get_fractional_coords(coords[-1])[-1]]
corner = slab.lattice.get_cartesian_coords(corner)[:2]
verts = orig_cell[:2, :2]
lattsum = verts[0] + verts[1]
for n, coord in enumerate(coords):
r = sites[n].specie.atomic_radius * scale
ax.add_patch(patches.Circle(coord[:2] - lattsum * (repeat // 2),
r, color=, zorder=2 * n))
color = color_dict[sites[n].species_string]
ax.add_patch(patches.Circle(coord[:2] - lattsum * (repeat // 2), r,
facecolor=color, alpha=alphas[n],
edgecolor=, lw=0.3, zorder=2 * n + 1))
if adsorption_sites:
asf = AdsorbateSiteFinder(orig_slab)
ads_sites = asf.find_adsorption_sites()[]
sop = get_rot(orig_slab)
ads_sites = [sop.operate(ads_site)[:2].tolist()
for ads_site in ads_sites]
ax.plot(*zip(*ads_sites), color=, marker=,
markersize=10, mew=1, linestyle=, zorder=10000)
if draw_unit_cell:
verts = np.insert(verts, 1, lattsum, axis=0).tolist()
verts += [[0., 0.]]
verts = [[0., 0.]] + verts
codes = [Path.MOVETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
verts = [(np.array(vert) + corner).tolist() for vert in verts]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor=, lw=2,
alpha=0.5, zorder=2 * n + 2)
ax.add_patch(patch)
ax.set_aspect("equal")
center = corner + lattsum / 2.
extent = np.max(lattsum)
lim_array = [center - extent * window, center + extent * window]
x_lim = [ele[0] for ele in lim_array]
y_lim = [ele[1] for ele in lim_array]
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
return ax | Function that helps visualize the slab in a 2-D plot, for
convenient viewing of output of AdsorbateSiteFinder.
Args:
slab (slab): Slab object to be visualized
ax (axes): matplotlib axes with which to visualize
scale (float): radius scaling for sites
repeat (int): number of repeating unit cells to visualize
window (float): window for setting the axes limits, is essentially
a fraction of the unit cell limits
draw_unit_cell (bool): flag indicating whether or not to draw cell
decay (float): how the alpha-value decays along the z-axis |
4,392 | def _setup_cgroups(self, my_cpus, memlimit, memory_nodes, cgroup_values):
logging.debug("Setting up cgroups for run.")
subsystems = [BLKIO, CPUACCT, FREEZER, MEMORY] + self._cgroup_subsystems
if my_cpus is not None or memory_nodes is not None:
subsystems.append(CPUSET)
subsystems = [s for s in subsystems if s in self.cgroups]
cgroups = self.cgroups.create_fresh_child_cgroup(*subsystems)
logging.debug("Created cgroups %s.", cgroups)
for ((subsystem, option), value) in cgroup_values.items():
try:
cgroups.set_value(subsystem, option, value)
except EnvironmentError as e:
cgroups.remove()
sys.exit(
.format(e.strerror, subsystem, option, value, e.errno))
logging.debug(,
subsystem, option, value, cgroups.get_value(subsystem, option))
if my_cpus is not None:
my_cpus_str = .join(map(str, my_cpus))
cgroups.set_value(CPUSET, , my_cpus_str)
my_cpus_str = cgroups.get_value(CPUSET, )
logging.debug(, my_cpus_str)
if memory_nodes is not None:
cgroups.set_value(CPUSET, , .join(map(str, memory_nodes)))
memory_nodesStr = cgroups.get_value(CPUSET, )
logging.debug(, memory_nodesStr)
if memlimit is not None:
limit =
cgroups.set_value(MEMORY, limit, memlimit)
swap_limit =
if not cgroups.has_value(MEMORY, swap_limit):
if systeminfo.has_swap():
sys.exit()
else:
try:
cgroups.set_value(MEMORY, swap_limit, memlimit)
except IOError as e:
if e.errno == errno.ENOTSUP:
sys.exit()
raise e
memlimit = cgroups.get_value(MEMORY, limit)
logging.debug(, memlimit)
if MEMORY in cgroups:
try:
cgroups.set_value(MEMORY, , )
except IOError as e:
logging.warning(, e)
return cgroups | This method creates the CGroups for the following execution.
@param my_cpus: None or a list of the CPU cores to use
@param memlimit: None or memory limit in bytes
@param memory_nodes: None or a list of memory nodes of a NUMA system to use
@param cgroup_values: dict of additional values to set
@return cgroups: a map of all the necessary cgroups for the following execution.
Please add the process of the following execution to all those cgroups! |
4,393 | def query_extensions(self, extension_query, account_token=None, account_token_header=None):
query_parameters = {}
if account_token is not None:
query_parameters[] = self._serialize.query(, account_token, )
content = self._serialize.body(extension_query, )
response = self._send(http_method=,
location_id=,
version=,
query_parameters=query_parameters,
content=content)
return self._deserialize(, response) | QueryExtensions.
[Preview API]
:param :class:`<ExtensionQuery> <azure.devops.v5_1.gallery.models.ExtensionQuery>` extension_query:
:param str account_token:
:param String account_token_header: Header to pass the account token
:rtype: :class:`<ExtensionQueryResult> <azure.devops.v5_1.gallery.models.ExtensionQueryResult>` |
4,394 | def splits(self):
if not self.__splits_aggregate:
self.__splits_aggregate = SplitsAggregate(self.book)
return self.__splits_aggregate | Splits |
4,395 | def encode(self):
tftpassert(self.filename, "filename required in initial packet")
tftpassert(self.mode, "mode required in initial packet")
filename = self.filename
mode = self.mode
if not isinstance(filename, bytes):
filename = filename.encode()
if not isinstance(self.mode, bytes):
mode = mode.encode()
ptype = None
if self.opcode == 1: ptype = "RRQ"
else: ptype = "WRQ"
log.debug("Encoding %s packet, filename = %s, mode = %s",
ptype, filename, mode)
for key in self.options:
log.debug(" Option %s = %s", key, self.options[key])
fmt = b"!H"
fmt += b"%dsx" % len(filename)
if mode == b"octet":
fmt += b"5sx"
else:
raise AssertionError("Unsupported mode: %s" % mode)
options_list = []
if len(list(self.options.keys())) > 0:
log.debug("there are options to encode")
for key in self.options:
name = key
if not isinstance(name, bytes):
name = name.encode()
options_list.append(name)
fmt += b"%dsx" % len(name)
value = self.options[key]
if isinstance(value, int):
value = str(value)
if not isinstance(value, bytes):
value = value.encode()
options_list.append(value)
fmt += b"%dsx" % len(value)
log.debug("fmt is %s", fmt)
log.debug("options_list is %s", options_list)
log.debug("size of struct is %d", struct.calcsize(fmt))
self.buffer = struct.pack(fmt,
self.opcode,
filename,
mode,
*options_list)
log.debug("buffer is %s", repr(self.buffer))
return self | Encode the packet's buffer from the instance variables. |
4,396 | def iMath(image, operation, *args):
if operation not in _iMathOps:
raise ValueError()
imagedim = image.dimension
outimage = image.clone()
args = [imagedim, outimage, operation, image] + [a for a in args]
processed_args = _int_antsProcessArguments(args)
libfn = utils.get_lib_fn()
libfn(processed_args)
return outimage | Perform various (often mathematical) operations on the input image/s.
Additional parameters should be specific for each operation.
See the the full iMath in ANTs, on which this function is based.
ANTsR function: `iMath`
Arguments
---------
image : ANTsImage
input object, usually antsImage
operation
a string e.g. "GetLargestComponent" ... the special case of "GetOperations"
or "GetOperationsFull" will return a list of operations and brief
description. Some operations may not be valid (WIP), but most are.
*args : non-keyword arguments
additional parameters specific to the operation
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_ants_data('r16'))
>>> img2 = ants.iMath(img, 'Canny', 1, 5, 12) |
4,397 | def validate_single_matching_uri(all_blockchain_uris: List[str], w3: Web3) -> str:
matching_uris = [
uri for uri in all_blockchain_uris if check_if_chain_matches_chain_uri(w3, uri)
]
if not matching_uris:
raise ValidationError("Package has no matching URIs on chain.")
elif len(matching_uris) != 1:
raise ValidationError(
f"Package has too many ({len(matching_uris)}) matching URIs: {matching_uris}."
)
return matching_uris[0] | Return a single block URI after validating that it is the *only* URI in
all_blockchain_uris that matches the w3 instance. |
4,398 | def _uniqualize(d):
abcb
pt = copy.deepcopy(d)
seqs_for_del =[]
vset = set({})
for k in pt:
vset.add(pt[k])
tslen = vset.__len__()
freq = {}
for k in pt:
v = pt[k]
if(v in freq):
freq[v] = freq[v] + 1
seqs_for_del.append(k)
else:
freq[v] = 0
npt = {}
for k in pt:
if(k in seqs_for_del):
pass
else:
npt[k] = pt[k]
pt = npt
return(npt) | d = {1:'a',2:'b',3:'c',4:'b'}
_uniqualize(d) |
4,399 | def _onDocstring( self, docstr, line ):
" Memorizes a function/class/module docstring "
if self.objectsStack:
self.objectsStack[ -1 ].docstring = \
Docstring( trim_docstring( docstr ), line )
return
self.docstring = Docstring( trim_docstring( docstr ), line )
return | Memorizes a function/class/module docstring |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.