Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
384,100 | def _refresh(self, session, stopping=False):
s current state.
This must be called under the registryt raise if the work unit is finished
:raises rejester.exceptions.LostLease: if this worker is
no longer doing this work unit
work unit is already finishedwork unit has already failedwork unit now blocked by others_lockswork unit claimed by %rt.
data = session.get(
WORK_UNITS_ + self.work_spec_name, self.key)
if data is None:
raise NoSuchWorkUnitError()
return | Get this task's current state.
This must be called under the registry's lock. It updates
the :attr:`finished` and :attr:`failed` flags and the
:attr:`data` dictionary based on the current state in the
registry.
In the normal case, nothing will change and this function
will return normally. If it turns out that the work unit
is already finished, the state of this object will change
before :exc:`rejester.exceptions.LostLease` is raised.
:param session: locked registry session
:param stopping: don't raise if the work unit is finished
:raises rejester.exceptions.LostLease: if this worker is
no longer doing this work unit |
384,101 | def this_week_day(base_date, weekday):
day_of_week = base_date.weekday()
if day_of_week > weekday:
return next_week_day(base_date, weekday)
start_of_this_week = base_date - timedelta(days=day_of_week + 1)
day = start_of_this_week + timedelta(days=1)
while day.weekday() != weekday:
day = day + timedelta(days=1)
return day | Finds coming weekday |
384,102 | def clear_vdp_vsi(self, port_uuid):
try:
LOG.debug("Clearing VDP VSI MAC %(mac)s UUID %(uuid)s",
{: self.vdp_vif_map[port_uuid].get(),
: self.vdp_vif_map[port_uuid].get()})
del self.vdp_vif_map[port_uuid]
except Exception:
LOG.error("VSI does not exist")
self.clear_oui(port_uuid) | Stores the vNIC specific info for VDP Refresh.
:param uuid: vNIC UUID |
384,103 | def verify_jwt_in_request_optional():
try:
if request.method not in config.exempt_methods:
jwt_data = _decode_jwt_from_request(request_type=)
ctx_stack.top.jwt = jwt_data
verify_token_claims(jwt_data)
_load_user(jwt_data[config.identity_claim_key])
except (NoAuthorizationError, InvalidHeaderError):
pass | Optionally check if this request has a valid access token. If an access
token in present in the request, :func:`~flask_jwt_extended.get_jwt_identity`
will return the identity of the access token. If no access token is
present in the request, this simply returns, and
:func:`~flask_jwt_extended.get_jwt_identity` will return `None` instead.
If there is an invalid access token in the request (expired, tampered with,
etc), this will still raise the appropiate exception. |
384,104 | def info(self, category_id, store_view=None, attributes=None):
return self.call(
, [category_id, store_view, attributes]
) | Retrieve Category details
:param category_id: ID of category to retrieve
:param store_view: Store view ID or code
:param attributes: Return the fields specified
:return: Dictionary of data |
384,105 | def process_delivery(message, notification):
mail = message[]
delivery = message[]
if in delivery:
delivered_datetime = clean_time(delivery[])
else:
delivered_datetime = None
deliveries = []
for eachrecipient in delivery[]:
deliveries += [Delivery.objects.create(
sns_topic=notification[],
sns_messageid=notification[],
mail_timestamp=clean_time(mail[]),
mail_id=mail[],
mail_from=mail[],
address=eachrecipient,
delivered_time=delivered_datetime,
processing_time=int(delivery[]),
smtp_response=delivery[]
)]
for eachdelivery in deliveries:
signals.feedback.send(
sender=Delivery,
instance=eachdelivery,
message=message,
notification=notification
)
logger.info(, str(len(deliveries)))
return HttpResponse() | Function to process a delivery notification |
384,106 | def _ns_query(self, session):
return session.query(ORMJob).filter(ORMJob.app == self.app,
ORMJob.namespace == self.namespace) | Return a SQLAlchemy query that is already namespaced by the app and namespace given to this backend
during initialization.
Returns: a SQLAlchemy query object |
384,107 | def parse_alert(output):
for x in output.splitlines():
match = ALERT_PATTERN.match(x)
if match:
rec = {: datetime.strptime(match.group(),
),
: int(match.group()),
: int(match.group()),
: int(match.group()),
: match.group(),
: match.group(),
: match.group(),
: match.group(),
}
if match.group():
rec[] = match.group()
yield rec | Parses the supplied output and yields any alerts.
Example alert format:
01/28/14-22:26:04.885446 [**] [1:1917:11] INDICATOR-SCAN UPnP service discover attempt [**] [Classification: Detection of a Network Scan] [Priority: 3] {UDP} 10.1.1.132:58650 -> 239.255.255.250:1900
:param output: A string containing the output of running snort
:returns: Generator of snort alert dicts |
384,108 | def ping(self):
status, _, body = self._request(, self.ping_path())
return(status is not None) and (bytes_to_str(body) == ) | Check server is alive over HTTP |
384,109 | def CompleteHuntIfExpirationTimeReached(hunt_obj):
if (hunt_obj.hunt_state not in [
rdf_hunt_objects.Hunt.HuntState.STOPPED,
rdf_hunt_objects.Hunt.HuntState.COMPLETED
] and hunt_obj.expired):
StopHunt(hunt_obj.hunt_id, reason="Hunt completed.")
data_store.REL_DB.UpdateHuntObject(
hunt_obj.hunt_id, hunt_state=hunt_obj.HuntState.COMPLETED)
return data_store.REL_DB.ReadHuntObject(hunt_obj.hunt_id)
return hunt_obj | Marks the hunt as complete if it's past its expiry time. |
384,110 | def parse(self, data):
graph = self._init_graph()
for link in data.get_inner_links():
if link.status != libcnml.libcnml.Status.WORKING:
continue
interface_a, interface_b = link.getLinkedInterfaces()
source = interface_a.ipv4
dest = interface_b.ipv4
graph.add_edge(source, dest, weight=1)
return graph | Converts a CNML structure to a NetworkX Graph object
which is then returned. |
384,111 | def authorize(self, email, permission_type=, cloud=None, api_key=None, version=None, **kwargs):
kwargs[] = permission_type
kwargs[] = email
url_params = {"batch": False, "api_key": api_key, "version": version, "method": "authorize"}
return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs) | This API endpoint allows you to authorize another user to access your model in a read or write capacity.
Before calling authorize, you must first make sure your model has been registered.
Inputs:
email - String: The email of the user you would like to share access with.
permission_type (optional) - String: One of ['read', 'write']. Users with read permissions can only call `predict`.
Users with `write` permissions can add new input examples and train models.
api_key (optional) - String: Your API key, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination.
cloud (optional) - String: Your private cloud domain, required only if the key has not been declared
elsewhere. This allows the API to recognize a request as yours and automatically route it
to the appropriate destination. |
384,112 | def keyEvent(self, key, down=1):
self.transport.write(pack("!BBxxI", 4, down, key)) | For most ordinary keys, the "keysym" is the same as the corresponding ASCII value.
Other common keys are shown in the KEY_ constants. |
384,113 | def roc_curve(roc_objs, obj_labels, colors, markers, filename, figsize=(8, 8),
xlabel="Probability of False Detection",
ylabel="Probability of Detection",
title="ROC Curve", ticks=np.arange(0, 1.1, 0.1), dpi=300,
legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5),
label_fontsize=14, title_fontsize=16, tick_fontsize=12):
if legend_params is None:
legend_params = dict(loc=4, fontsize=12, framealpha=1, frameon=True)
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(ticks, ticks, "k--")
if bootstrap_sets is not None:
for b, b_set in enumerate(bootstrap_sets):
broc_curves = np.dstack([b_roc.roc_curve().values for b_roc in b_set])
pod_range = np.percentile(broc_curves[:,0], ci, axis=1)
pofd_range = np.percentile(broc_curves[:, 1], ci, axis=1)
pod_poly = np.concatenate((pod_range[1], pod_range[0, ::-1]))
pofd_poly = np.concatenate((pofd_range[0], pofd_range[1, ::-1]))
pod_poly[np.isnan(pod_poly)] = 0
pofd_poly[np.isnan(pofd_poly)] = 0
plt.fill(pofd_poly, pod_poly, alpha=0.5, color=colors[b])
for r, roc_obj in enumerate(roc_objs):
roc_data = roc_obj.roc_curve()
plt.plot(roc_data["POFD"], roc_data["POD"], marker=markers[r], color=colors[r], label=obj_labels[r])
plt.xlabel(xlabel, fontsize=label_fontsize)
plt.ylabel(ylabel, fontsize=label_fontsize)
plt.xticks(ticks, fontsize=tick_fontsize)
plt.yticks(ticks, fontsize=tick_fontsize)
plt.title(title, fontsize=title_fontsize)
plt.legend(**legend_params)
plt.savefig(filename, dpi=dpi, bbox_inches="tight")
plt.close() | Plots a set receiver/relative operating characteristic (ROC) curves from DistributedROC objects.
The ROC curve shows how well a forecast discriminates between two outcomes over a series of thresholds. It
features Probability of Detection (True Positive Rate) on the y-axis and Probability of False Detection
(False Alarm Rate) on the x-axis. This plotting function allows one to customize the colors and markers of the
ROC curves as well as the parameters of the legend and the title.
Args:
roc_objs (list): DistributedROC objects being plotted.
obj_labels (list): Label describing the forecast associated with a DistributedROC object.
colors (list): List of matplotlib-readable colors (names or hex-values) for each curve.
markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve.
filename (str): Name of figure file being saved.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): Label for the x-axis.
ylabel (str): Label for the y-axis.
title (str): The title of the figure.
ticks (numpy.ndarray): Values shown on the x and y axes.
dpi (int): Figure resolution in dots per inch.
legend_params (None, dict): Keyword arguments for the formatting of the figure legend.
bootstrap_sets (list): List of lists of DistributedROC objects that were bootstrap resampled for each model.
ci (tuple of 2 floats): Quantiles of the edges of the bootstrap confidence intervals ranging from 0 to 100.
label_fontsize (int): Font size of the x and y axis labels.
title_fontsize (int): Font size of the title.
tick_fontsize (int): Font size of the x and y tick labels.
Examples:
>>> from hagelslag.evaluation import DistributedROC
>>> import numpy as np
>>> forecasts = np.random.random(1000)
>>> obs = np.random.random_integers(0, 1, 1000)
>>> roc = DistributedROC()
>>> roc.update(forecasts, obs)
>>> roc_curve([roc], ["Random"], ["orange"], ["o"], "random_roc.png") |
384,114 | def should_expand(self, tag):
return self.indentation is not None and tag and (
not self.previous_indent or (
tag.serializer ==
and tag.subtype.serializer in (, , )
) or (
tag.serializer ==
)
) | Return whether the specified tag should be expanded. |
384,115 | def get_undefined_namespace_names(graph: BELGraph, namespace: str) -> Set[str]:
return {
exc.name
for _, exc, _ in graph.warnings
if isinstance(exc, UndefinedNamespaceWarning) and exc.namespace == namespace
} | Get the names from a namespace that wasn't actually defined.
:return: The set of all names from the undefined namespace |
384,116 | def _train_lbfgs(self, X_feat_train, X_seq_train, y_train,
X_feat_valid, X_seq_valid, y_valid,
graph, var, other_var,
early_stop_patience=None,
n_cores=3):
tic = time.time()
n_epochs = self._param["n_epochs"]
print_every = self._param["print_every"]
step_size = self._param["step_size"]
num_steps = n_epochs
print(, n_epochs)
loss_history = []
train_acc_vec = []
valid_acc_vec = []
step_history = []
with tf.Session(graph=graph, config=tf.ConfigProto(
use_per_session_threads=True,
inter_op_parallelism_threads=n_cores,
intra_op_parallelism_threads=n_cores)) as sess:
sess.run(other_var["init"])
best_performance = None
best_performance_epoch = 0
for step in range(n_epochs):
feed_dict = {other_var["tf_X_seq"]: X_seq_train, other_var["tf_y"]: y_train,
other_var["tf_X_feat"]: X_feat_train,
other_var["tf_step_size"]: step_size}
other_var["optimizer"].minimize(sess, feed_dict=feed_dict)
l = sess.run(other_var["loss"], feed_dict=feed_dict)
loss_history.append(l)
if (step % print_every == 0):
train_accuracy = self._accuracy_in_session(sess, other_var,
X_feat_train, X_seq_train, y_train)
valid_accuracy = self._accuracy_in_session(sess, other_var,
X_feat_valid, X_seq_valid, y_valid)
train_acc_vec.append(train_accuracy)
valid_acc_vec.append(valid_accuracy)
step_history.append(step / num_steps)
print( %
(step, l, train_accuracy, valid_accuracy))
if best_performance is None or valid_accuracy <= best_performance:
best_performance = valid_accuracy
best_performance_epoch = step
if early_stop_patience is not None and step > best_performance_epoch + early_stop_patience:
print("Early stopping. best_performance_epoch: %d, best_performance: %f" %
(best_performance_epoch, best_performance))
break
train_accuracy_final = self._accuracy_in_session(sess, other_var,
X_feat_train, X_seq_train, y_train)
valid_accuracy_final = self._accuracy_in_session(sess, other_var,
X_feat_valid, X_seq_valid, y_valid)
print( % valid_accuracy_final)
var_res = self._get_var_res_sess(sess, var)
if self._param["n_splines"] is not None:
self._splines["quasi_X"] = [self._predict_in_session(sess, other_var,
X_feat_train[i:(i + 1)],
X_seq_train[i:(i + 1)],
variable="spline_quasi_X")
for i in range(X_feat_train.shape[0])]
self._splines["quasi_X"] = np.concatenate([x[0][np.newaxis] for x in self._splines["quasi_X"]])
accuracy = {
"loss_history": np.array(loss_history),
"step_history": np.array(step_history),
"train_acc_history": np.array(train_acc_vec),
"val_acc_history": np.array(valid_acc_vec),
"train_acc_final": train_accuracy_final,
"val_acc_final": valid_accuracy_final,
"best_val_acc": best_performance,
"best_val_acc_epoch": best_performance_epoch,
"test_acc_final": None,
"y_test": None,
"y_test_prediction": None,
"id_vec_test": None
}
self._accuracy = accuracy
toc = time.time()
exec_time = toc - tic
self._exec_time = exec_time
print( % exec_time)
return var_res | Train the model actual model
Updates weights / variables, computes and returns the training and validation accuracy |
384,117 | def get_cutout(self, resource, resolution, x_range, y_range, z_range, time_range=None, id_list=[], no_cache=None, access_mode=CacheMode.no_cache, **kwargs):
if no_cache is not None:
warnings.warn("The no-cache option has been deprecated and will not be used in future versions of intern.")
warnings.warn("Please from intern.service.boss.volume import CacheMode and use access_mode=CacheMode.[cache,no-cache,raw] instead.")
if no_cache and access_mode != CacheMode.no_cache:
warnings.warn("Both no_cache and access_mode were used, please use access_mode only. As no_cache has been deprecated. ")
warnings.warn("Your request will be made using the default mode no_cache.")
access_mode=CacheMode.no_cache
if no_cache:
access_mode=CacheMode.no_cache
elif no_cache == False:
access_mode=CacheMode.cache
return self._volume.get_cutout(resource, resolution, x_range, y_range, z_range, time_range, id_list, access_mode, **kwargs) | Get a cutout from the volume service.
Note that access_mode=no_cache is desirable when reading large amounts of
data at once. In these cases, the data is not first read into the
cache, but instead, is sent directly from the data store to the
requester.
Args:
resource (intern.resource.boss.resource.ChannelResource | str): Channel or layer Resource. If a
string is provided instead, BossRemote.parse_bossURI is called instead on a URI-formatted
string of the form `bossdb://collection/experiment/channel`.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40.
id_list (optional [list[int]]): list of object ids to filter the cutout by.
no_cache (optional [boolean or None]): Deprecated way to specify the use of cache to be True or False.
access_mode should be used instead
access_mode (optional [Enum]): Identifies one of three cache access options:
cache = Will check both cache and for dirty keys
no_cache = Will skip cache check but check for dirty keys
raw = Will skip both the cache and dirty keys check
TODO: Add mode to documentation
Returns:
(numpy.array): A 3D or 4D (time) numpy matrix in (time)ZYX order.
Raises:
requests.HTTPError on error. |
384,118 | def label_position(self):
reg_sizes = [(r.size(), r) for r in self.pieces]
reg_sizes.sort()
return reg_sizes[-1][1].label_position() | Find the largest region and position the label in that. |
384,119 | def get_function_for_cognito_trigger(self, trigger):
print("get_function_for_cognito_trigger", self.settings.COGNITO_TRIGGER_MAPPING, trigger, self.settings.COGNITO_TRIGGER_MAPPING.get(trigger))
return self.settings.COGNITO_TRIGGER_MAPPING.get(trigger) | Get the associated function to execute for a cognito trigger |
384,120 | def add_job(self, idx):
self.loads[idx] += 1
for lis in (self.targets, self.loads):
lis.append(lis.pop(idx)) | Called after self.targets[idx] just got the job with header.
Override with subclasses. The default ordering is simple LRU.
The default loads are the number of outstanding jobs. |
384,121 | def read(self, source = None, **options):
message = self.read_header(source)
message.data = self.read_data(message.size, message.is_compressed, **options)
return message | Reads and optionally parses a single message.
:Parameters:
- `source` - optional data buffer to be read, if not specified data is
read from the wrapped stream
:Options:
- `raw` (`boolean`) - indicates whether read data should parsed or
returned in raw byte form
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: :class:`.QMessage` - read data (parsed or raw byte form) along
with meta information |
384,122 | def dbg_print_irsb(self, irsb_addr, project=None):
if project is None:
project = self._project
if project is None:
raise Exception("Dict addr_to_run is empty. " + \
"Give me a project, and I')
statements[i].pp() | Pretty-print an IRSB with whitelist information |
384,123 | def pass_to_pipeline_if_article(
self,
response,
source_domain,
original_url,
rss_title=None
):
if self.helper.heuristics.is_article(response, original_url):
return self.pass_to_pipeline(
response, source_domain, rss_title=None) | Responsible for passing a NewscrawlerItem to the pipeline if the
response contains an article.
:param obj response: the scrapy response to work on
:param str source_domain: the response's domain as set for the crawler
:param str original_url: the url set in the json file
:param str rss_title: the title extracted by an rssCrawler
:return NewscrawlerItem: NewscrawlerItem to pass to the pipeline |
384,124 | def plot_correlated_groups(self, group=None, n_genes=5, **kwargs):
geneID_groups = self.adata.uns[]
if(group is None):
for i in range(len(geneID_groups)):
self.show_gene_expression(geneID_groups[i][0], **kwargs)
else:
for i in range(n_genes):
self.show_gene_expression(geneID_groups[group][i], **kwargs) | Plots orthogonal expression patterns.
In the default mode, plots orthogonal gene expression patterns. A
specific correlated group of genes can be specified to plot gene
expression patterns within that group.
Parameters
----------
group - int, optional, default None
If specified, display the genes within the desired correlated
group. Otherwise, display the top ranked gene within each distinct
correlated group.
n_genes - int, optional, default 5
The number of top ranked genes to display within a correlated
group if 'group' is specified.
**kwargs -
All keyword arguments in 'show_gene_expression' and 'scatter'
are eligible. |
384,125 | def _should_retry(exc):
if not hasattr(exc, "errors"):
return False
if len(exc.errors) == 0:
return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)
reason = exc.errors[0]["reason"]
return reason in _RETRYABLE_REASONS | Predicate for determining when to retry.
We retry if and only if the 'reason' is 'backendError'
or 'rateLimitExceeded'. |
384,126 | def scp_file(dest_path, contents=None, kwargs=None, local_file=None):
file_to_upload = None
try:
if contents is not None:
try:
tmpfd, file_to_upload = tempfile.mkstemp()
os.write(tmpfd, contents)
finally:
try:
os.close(tmpfd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise exc
log.debug(, dest_path, kwargs[])
ssh_args = [
]
if local_file is not None:
file_to_upload = local_file
if os.path.isdir(local_file):
ssh_args.append()
if in kwargs:
ssh_args.extend([
,
,
,
,
,
.format(kwargs[])
])
if in kwargs:
ssh_args.append(.format(kwargs[]))
ssh_args.append(__ssh_gateway_arguments(kwargs))
try:
if socket.inet_pton(socket.AF_INET6, kwargs[]):
ipaddr = .format(kwargs[])
else:
ipaddr = kwargs[]
except socket.error:
ipaddr = kwargs[]
if file_to_upload is None:
log.warning(
)
cmd = (
.format(
.join(ssh_args), file_to_upload, kwargs, dest_path, ipaddr
)
)
log.debug(%s\, cmd)
retcode = _exec_ssh_cmd(cmd,
error_msg={0}\,
password_retries=3,
**kwargs)
finally:
if contents is not None:
try:
os.remove(file_to_upload)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise exc
return retcode | Use scp or sftp to copy a file to a server |
384,127 | def iter_multi_items(mapping):
if isinstance(mapping, MultiDict):
for item in iteritems(mapping, multi=True):
yield item
elif isinstance(mapping, dict):
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
for value in value:
yield key, value
else:
yield key, value
else:
for item in mapping:
yield item | Iterates over the items of a mapping yielding keys and values
without dropping any from more complex structures. |
384,128 | def save(self, fname, compression=):
egg = {
: df2list(self.pres),
: df2list(self.rec),
: self.dist_funcs,
: self.subjgroup,
: self.subjname,
: self.listgroup,
: self.listname,
: self.date_created,
: self.meta
}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
dd.io.save(fname, egg, compression=compression) | Save method for the Egg object
The data will be saved as a 'egg' file, which is a dictionary containing
the elements of a Egg saved in the hd5 format using
`deepdish`.
Parameters
----------
fname : str
A name for the file. If the file extension (.egg) is not specified,
it will be appended.
compression : str
The kind of compression to use. See the deepdish documentation for
options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save |
384,129 | def build(self, construct):
if lib.EnvBuild(self._env, construct.encode()) != 1:
raise CLIPSError(self._env) | Build a single construct in CLIPS.
The Python equivalent of the CLIPS build command. |
384,130 | def rename(self, newpath):
"Move folder to a new name, possibly a whole new path"
params = {: % (self.jfs.username, newpath)}
r = self.jfs.post(self.path,
extra_headers={:},
params=params)
return r | Move folder to a new name, possibly a whole new path |
384,131 | def make_energy_funnel_data(self, cores=1):
if not self.parameter_log:
raise AttributeError(
)
model_cls = self._params[]
gen_tagged = []
for gen, models in enumerate(self.parameter_log):
for model in models:
gen_tagged.append((model[0], model[1], gen))
sorted_pps = sorted(gen_tagged, key=lambda x: x[1])
top_result = sorted_pps[0]
top_result_model = model_cls(*top_result[0])
if (cores == 1) or (sys.platform == ):
energy_rmsd_gen = map(
self.funnel_rebuild,
[(x, top_result_model,
self._params[]) for x in sorted_pps[1:]])
else:
with futures.ProcessPoolExecutor(
max_workers=self._params[]) as executor:
energy_rmsd_gen = executor.map(
self.funnel_rebuild,
[(x, top_result_model, self._params[])
for x in sorted_pps[1:]])
return list(energy_rmsd_gen) | Compares models created during the minimisation to the best model.
Returns
-------
energy_rmsd_gen: [(float, float, int)]
A list of triples containing the BUFF score, RMSD to the
top model and generation of a model generated during the
minimisation. |
384,132 | def precmd(self, line):
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii += 1
line = line.replace("%*", .join(args[1:]))
args = line.split()
if marker >= 0:
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line | Handle alias expansion and ';;' separator. |
384,133 | def getCandScoresMap(self, profile):
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
print("ERROR: unsupported election type")
exit()
copelandScores = dict()
for cand in profile.candMap.keys():
copelandScores[cand] = 0.0
preferenceCounts = profile.getPreferenceCounts()
wmgMap = profile.getWmg()
for cand1, cand2 in itertools.combinations(wmgMap.keys(), 2):
if cand2 in wmgMap[cand1].keys():
if wmgMap[cand1][cand2] > 0:
copelandScores[cand1] += 1.0
elif wmgMap[cand1][cand2] < 0:
copelandScores[cand2] += 1.0
else:
copelandScores[cand1] += self.alpha
copelandScores[cand2] += self.alpha
return copelandScores | Returns a dictionary that associates integer representations of each candidate with their
Copeland score.
:ivar Profile profile: A Profile object that represents an election profile. |
384,134 | def part_sum(x, i=0):
if i == len(x):
yield 0
else:
for s in part_sum(x, i + 1):
yield s
yield s + x[i] | All subsetsums from x[i:]
:param x: table of values
:param int i: index defining suffix of x to be considered
:iterates: over all values, in arbitrary order
:complexity: :math:`O(2^{len(x)-i})` |
384,135 | def iter_auth_hashes(user, purpose, minutes_valid):
now = timezone.now().replace(microsecond=0, second=0)
for minute in range(minutes_valid + 1):
yield hashlib.sha1(
% (
now - datetime.timedelta(minutes=minute),
user.password,
purpose,
user.pk,
settings.SECRET_KEY,
),
).hexdigest() | Generate auth tokens tied to user and specified purpose.
The hash expires at midnight on the minute of now + minutes_valid, such
that when minutes_valid=1 you get *at least* 1 minute to use the token. |
384,136 | def remove(self, safe=None):
self._session.remove(self, safe=None)
self._session.flush() | Removes the document itself from database.
The optional ``safe`` argument is a boolean that specifies if the
remove method should wait for the operation to complete. |
384,137 | def images(self):
CREATED7 days agoTAGlatestIMAGE ID2298fbaac143VIRTUAL SIZE302.7 MBREPOSITORYtest1
sys_command =
sys_output = self.command(sys_command)
image_list = self._images(sys_output)
return image_list | a method to list the local docker images
:return: list of dictionaries with available image fields
[ {
'CREATED': '7 days ago',
'TAG': 'latest',
'IMAGE ID': '2298fbaac143',
'VIRTUAL SIZE': '302.7 MB',
'REPOSITORY': 'test1'
} ] |
384,138 | def start_notebook(self, name, context: dict, fg=False):
assert context
assert type(context) == dict
assert "context_hash" in context
assert type(context["context_hash"]) == int
http_port = self.pick_port()
assert http_port
context = context.copy()
context["http_port"] = http_port
if "websocket_url" not in context:
context["websocket_url"] = "ws://localhost:{port}".format(port=http_port)
if "{port}" in context["websocket_url"]:
context["websocket_url"] = context["websocket_url"].format(port=http_port)
pid = self.get_pid(name)
assert "terminated" not in context
comm.set_context(pid, context)
if fg:
self.exec_notebook_daemon_command(name, "fg", port=http_port)
else:
self.exec_notebook_daemon_command(name, "start", port=http_port) | Start new IPython Notebook daemon.
:param name: The owner of the Notebook will be *name*. He/she gets a new Notebook content folder created where all files are placed.
:param context: Extra context information passed to the started Notebook. This must contain {context_hash:int} parameter used to identify the launch parameters for the notebook |
384,139 | def dafopw(fname):
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int()
libspice.dafopw_c(fname, ctypes.byref(handle))
return handle.value | Open a DAF for subsequent write requests.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafopw_c.html
:param fname: Name of DAF to be opened.
:type fname: str
:return: Handle assigned to DAF.
:rtype: int |
384,140 | def visit_attribute(self, node):
if self._uses_mandatory_method_param(node):
self._accessed.set_accessed(node)
return
if not self.linter.is_message_enabled("protected-access"):
return
self._check_protected_attribute_access(node) | check if the getattr is an access to a class member
if so, register it. Also check for access to protected
class member from outside its class (but ignore __special__
methods) |
384,141 | def get_logger(name=None, level=logging.DEBUG, stream=None):
logger = logging.getLogger(name)
colored = colorize_logger(logger, stream=stream, level=level)
return colored | returns a colorized logger. This function can be used just like
:py:func:`logging.getLogger` except you can set the level right
away. |
384,142 | def __reorganize_chron_header(line):
d = {}
m = re.split(re_tab_split, line)
if m:
for s in m:
m2 = re.match(re_var_w_units, s)
if m2:
if m2.group(2) is None:
d[m2.group(1)] = ""
else:
d[m2.group(1)] = m2.group(2)
return d | Reorganize the list of variables. If there are units given, log them.
:param str line:
:return dict: key: variable, val: units (optional) |
384,143 | def is_special_string(obj):
import bs4
return isinstance(obj, (bs4.Comment, bs4.Declaration, bs4.CData, bs4.ProcessingInstruction)) | Is special string. |
384,144 | def _set_row_label(self, value):
"Set the row label format string (empty to hide)"
if not value:
self.wx_obj.SetRowLabelSize(0)
else:
self.wx_obj._table._row_label = value | Set the row label format string (empty to hide) |
384,145 | def get_body(self):
raw_body = yield get_arg(self, 2)
if not self.serializer:
raise tornado.gen.Return(raw_body)
else:
body = self.serializer.deserialize_body(raw_body)
raise tornado.gen.Return(body) | Get the body value from the response.
:return: a future contains the deserialized value of body |
384,146 | def potential_cloud_layer(self, pcp, water, tlow, land_cloud_prob, land_threshold,
water_cloud_prob, water_threshold=0.5):
part1 = (pcp & water & (water_cloud_prob > water_threshold))
part2 = (pcp & ~water & (land_cloud_prob > land_threshold))
temptest = self.tirs1 < (tlow - 35)
if self.sat in [, ]:
saturation = self.blue_saturated | self.green_saturated | self.red_saturated
return part1 | part2 | temptest | saturation
else:
return part1 | part2 | temptest | Final step of determining potential cloud layer
Equation 18 (Zhu and Woodcock, 2012)
Saturation (green or red) test is not in the algorithm
Parameters
----------
pcps: ndarray
potential cloud pixels
water: ndarray
water mask
tirs1: ndarray
tlow: float
low percentile of land temperature
land_cloud_prob: ndarray
probability of cloud over land
land_threshold: float
cutoff for cloud over land
water_cloud_prob: ndarray
probability of cloud over water
water_threshold: float
cutoff for cloud over water
Output
------
ndarray:
potential cloud layer, boolean |
384,147 | def highlight_multiline_block(self, block, start_pattern, end_pattern, state, format):
if self.previousBlockState() == state:
start = 0
extend = 0
else:
start = start_pattern.indexIn(block)
extend = start_pattern.matchedLength()
while start >= 0:
end = end_pattern.indexIn(block, start + extend)
if end >= extend:
length = end - start + extend + end_pattern.matchedLength()
self.setCurrentBlockState(0)
else:
self.setCurrentBlockState(state)
length = block.length() - start + extend
self.setFormat(start, length, format)
start = start_pattern.indexIn(block, start + length)
if self.currentBlockState() == state:
return True
else:
return False | Highlights given multiline text block.
:param block: Text block.
:type block: QString
:param pattern: Start regex pattern.
:type pattern: QRegExp
:param pattern: End regex pattern.
:type pattern: QRegExp
:param format: Format.
:type format: QTextCharFormat
:param state: Block state.
:type state: int
:return: Current block matching state.
:rtype: bool |
384,148 | def _set_default_configuration_options(app):
app.config.setdefault(, (,))
app.config.setdefault(, )
app.config.setdefault(, )
app.config.setdefault(, )
app.config.setdefault(, )
app.config.setdefault(, )
app.config.setdefault(, )
app.config.setdefault(, )
app.config.setdefault(, False)
app.config.setdefault(, None)
app.config.setdefault(, True)
app.config.setdefault(, None)
app.config.setdefault(, )
app.config.setdefault(, )
app.config.setdefault(, True)
app.config.setdefault(, [, , , ])
app.config.setdefault(, )
app.config.setdefault(, )
app.config.setdefault(, True)
app.config.setdefault(, )
app.config.setdefault(, )
app.config.setdefault(, )
app.config.setdefault(, )
app.config.setdefault(, datetime.timedelta(minutes=15))
app.config.setdefault(, datetime.timedelta(days=30))
app.config.setdefault(, )
app.config.setdefault(, None)
app.config.setdefault(, None)
app.config.setdefault(, None)
app.config.setdefault(, False)
app.config.setdefault(, (, ))
app.config.setdefault(, )
app.config.setdefault(, )
app.config.setdefault(, None)
app.config.setdefault(, 0)
app.config.setdefault(, False)
app.config.setdefault(, ) | Sets the default configuration options used by this extension |
384,149 | def get_registration(self, path):
if not self.is_registered(path):
raise NotRegistered("Email template not registered")
return self._registry[path] | Returns registration item for specified path.
If an email template is not registered, this will raise NotRegistered. |
384,150 | def get_pixel(framebuf, x, y):
index = (y >> 3) * framebuf.stride + x
offset = y & 0x07
return (framebuf.buf[index] >> offset) & 0x01 | Get the color of a given pixel |
384,151 | def create_CreateProcessWarnMultiproc(original_name):
def new_CreateProcess(*args):
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
warn_multiproc()
return getattr(_subprocess, original_name)(*args)
return new_CreateProcess | CreateProcess(*args, **kwargs) |
384,152 | def length_range(string, minimum, maximum):
int_range(len(string), minimum, maximum)
return string | Requires values' length to be in a certain range.
:param string: Value to validate
:param minimum: Minimum length to accept
:param maximum: Maximum length to accept
:type string: str
:type minimum: int
:type maximum: int |
384,153 | def check_oscntab(oscntab, ccdamp, xsize, ysize, leading, trailing):
tab = Table.read(oscntab)
ccdamp = ccdamp.lower().rstrip()
for row in tab:
if (row[].lower().rstrip() in ccdamp and
row[] == xsize and row[] == ysize and
row[] == leading and row[] == trailing):
return True
return False | Check if the supplied parameters are in the
``OSCNTAB`` reference file.
.. note:: Even if an entry does not exist in ``OSCNTAB``,
as long as the subarray does not have any overscan,
it should not be a problem for CALACS.
.. note:: This function does not check the virtual bias rows.
Parameters
----------
oscntab : str
Path to the ``OSCNTAB`` reference file being checked against.
ccdamp : str
Amplifier(s) used to read out the CCDs.
xsize : int
Number of columns in the readout.
ysize : int
Number of rows in the readout.
leading : int
Number of columns in the bias section ("TRIMX1" to be trimmed off
by ``BLEVCORR``) on the A/C amplifiers side of the CCDs.
trailing : int
Number of columns in the bias section ("TRIMX2" to be trimmed off
by ``BLEVCORR``) on the B/D amplifiers side of the CCDs.
Returns
-------
supported : bool
Result of test if input parameters are in ``OSCNTAB``. |
384,154 | def get_playlists(self, search, start=0, max_items=100):
return self.get_music_service_information(, search, start,
max_items) | Search for playlists.
See get_music_service_information for details on the arguments.
Note:
Un-intuitively this method returns MSAlbumList items. See
note in class doc string for details. |
384,155 | def filter_and_save(raw, symbol_ids, destination_path):
logging.info()
new_hw_ds = []
for el in raw[]:
if el[] in symbol_ids:
el[] = symbol_ids[el[]]
el[].formula_id = symbol_ids[el[]]
new_hw_ds.append(el)
raw[] = new_hw_ds
logging.info(, len(new_hw_ds))
pickle.dump(raw, open(destination_path, "wb"), 2) | Parameters
----------
raw : dict
with key 'handwriting_datasets'
symbol_ids : dict
Maps LaTeX to write-math.com id
destination_path : str
Path where the filtered dict 'raw' will be saved |
384,156 | def get_source(self, source_id=None, source_name=None):
if not (bool(source_id) ^ bool(source_name)):
raise ValueError()
if source_id:
try:
source_id = int(source_id)
except (ValueError, TypeError):
raise ValueError(
)
sel = select([self.source], self.source.c.id == source_id).execute()
else:
sel = select([self.source], self.source.c.name == source_name).execute()
result = sel.fetchone()
if not result:
raise ValueError(
.format(
source_id, source_name))
return dict(list(zip(list(sel.keys()), result))) | Returns a dict with keys ['id', 'name', 'description'] or None if
no match. The ``id`` field is guaranteed to be an int that
exists in table source. Requires exactly one of ``source_id``
or ``source_name``. A new source corresponding to
``source_name`` is created if necessary. |
384,157 | def remove_all_timers(self):
with self.lock:
if self.rtimer is not None:
self.rtimer.cancel()
self.timers = {}
self.heap = []
self.rtimer = None
self.expiring = False | Remove all waiting timers and terminate any blocking threads. |
384,158 | def conversations_setTopic(
self, *, channel: str, topic: str, **kwargs
) -> SlackResponse:
kwargs.update({"channel": channel, "topic": topic})
return self.api_call("conversations.setTopic", json=kwargs) | Sets the topic for a conversation.
Args:
channel (str): The channel id. e.g. 'C1234567890'
topic (str): The new topic for the channel. e.g. 'My Topic' |
384,159 | def rm_eltorito(self):
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput()
if self.eltorito_boot_catalog is None:
raise pycdlibexception.PyCdlibInvalidInput()
for brindex, br in enumerate(self.brs):
if br.boot_system_identifier == b.ljust(32, b):
eltorito_index = brindex
break
else:
raise pycdlibexception.PyCdlibInternalError()
del self.brs[eltorito_index]
num_bytes_to_remove = 0
entries_to_remove = [self.eltorito_boot_catalog.initial_entry]
for sec in self.eltorito_boot_catalog.sections:
for entry in sec.section_entries:
entries_to_remove.append(entry)
for entry in entries_to_remove:
if entry.inode is not None:
new_list = []
for linkrec in entry.inode.linked_records:
if id(linkrec) != id(entry):
new_list.append(linkrec)
entry.inode.linked_records = new_list
num_bytes_to_remove += len(self.eltorito_boot_catalog.record())
self.eltorito_boot_catalog = None
self._finish_remove(num_bytes_to_remove, True) | Remove the El Torito boot record (and Boot Catalog) from the ISO.
Parameters:
None.
Returns:
Nothing. |
384,160 | def ping(self, endpoint=):
r = requests.get(self.url() + "/" + endpoint)
return r.status_code | Ping the server to make sure that you can access the base URL.
Arguments:
None
Returns:
`boolean` Successful access of server (or status code) |
384,161 | def respond_to_contact_info(self, message):
contacts = self.load("contact_info", {})
context = {
"contacts": contacts,
}
contact_html = rendered_template("contact_info.html", context)
self.say(contact_html, message=message) | contact info: Show everyone's emergency contact info. |
384,162 | def path(self):
if len(self.heads) == 1:
return _fmt_mfs_path(self.heads.keys()[0], self.heads.values()[0])
else:
return "(" + "|".join(
_fmt_mfs_path(k, v) for (k, v) in self.heads.items()
) + ")" | The path attribute returns a stringified, concise representation of
the MultiFieldSelector. It can be reversed by the ``from_path``
constructor. |
384,163 | def detrend(x, deg=1):
t=range(len(x))
p = np.polyfit(t, x, deg)
residual = x - np.polyval(p, t)
return residual | remove polynomial from data.
used by autocorr_noise_id()
Parameters
----------
x: numpy.array
time-series
deg: int
degree of polynomial to remove from x
Returns
-------
x_detrended: numpy.array
detrended time-series |
384,164 | def _find_executables(self):
if len(self.needs) > 0:
return
for execname, executable in list(self.module.executables.items()):
skip = False
if any([p.direction == "" for p in executable.ordered_parameters]):
msg.warn("Some parameters in {}.{} have no intent".format(self.module.name, execname) +
" specified. Can't wrap that executable.")
skip = True
if not skip:
self.uses.append(execname)
for depmod in executable.search_dependencies():
if depmod not in self.needs:
self.needs.append(depmod) | Finds the list of executables that pass the requirements necessary to have
a wrapper created for them. |
384,165 | def _setup_chassis(self):
self._create_slots(2)
self._slots[0] = self.integrated_adapters[self._chassis]() | Sets up the router with the corresponding chassis
(create slots and insert default adapters). |
384,166 | async def get_chat(self):
if (self._chat is None or getattr(self._chat, , None))\
and await self.get_input_chat():
try:
self._chat =\
await self._client.get_entity(self._input_chat)
except ValueError:
await self._refetch_chat()
return self._chat | Returns `chat`, but will make an API call to find the
chat unless it's already cached. |
384,167 | def unindent(self):
if self.tab_always_indent:
cursor = self.editor.textCursor()
if not cursor.hasSelection():
cursor.select(cursor.LineUnderCursor)
self.unindent_selection(cursor)
else:
super(PyIndenterMode, self).unindent() | Performs an un-indentation |
384,168 | def addContainer(self, query):
self.setUpdatesEnabled(False)
self.blockSignals(True)
container = XOrbQueryContainer(self)
container.setShowBack(self.count() > 0)
container.enterCompoundRequested.connect(self.enterContainer)
container.exitCompoundRequested.connect(self.exitContainer)
self.addWidget(container)
self.setUpdatesEnabled(True)
self.blockSignals(False)
container.setQuery(query)
self.slideInNext()
return container | Creates a new query container widget object and slides it into
the frame.
:return <XOrbQueryContainer> |
384,169 | def dlopen(ffi, *names):
for name in names:
for lib_name in (name, + name):
try:
path = ctypes.util.find_library(lib_name)
lib = ffi.dlopen(path or lib_name)
if lib:
return lib
except OSError:
pass
raise OSError("dlopen() failed to load a library: %s" % .join(names)) | Try various names for the same library, for different platforms. |
384,170 | def autocommit(data_access):
if not data_access.autocommit:
data_access.commit()
old_autocommit = data_access.autocommit
data_access.autocommit = True
try:
yield data_access
finally:
data_access.autocommit = old_autocommit | Make statements autocommit.
:param data_access: a DataAccess instance |
384,171 | def match_replace(cls, ops, kwargs):
expr = ProtoExpr(ops, kwargs)
if LOG:
logger = logging.getLogger()
for key, rule in cls._rules.items():
pat, replacement = rule
match_dict = match_pattern(pat, expr)
if match_dict:
try:
replaced = replacement(**match_dict)
if LOG:
logger.debug(
"%sRule %s.%s: (%s, %s) -> %s", (" " * (LEVEL)),
cls.__name__, key, expr.args, expr.kwargs, replaced)
return replaced
except CannotSimplify:
if LOG_NO_MATCH:
logger.debug(
"%sRule %s.%s: no match: CannotSimplify",
(" " * (LEVEL)), cls.__name__, key)
continue
else:
if LOG_NO_MATCH:
logger.debug(
"%sRule %s.%s: no match: %s", (" " * (LEVEL)),
cls.__name__, key, match_dict.reason)
return ops, kwargs | Match and replace a full operand specification to a function that
provides a replacement for the whole expression
or raises a :exc:`.CannotSimplify` exception.
E.g.
First define an operation::
>>> class Invert(Operation):
... _rules = OrderedDict()
... simplifications = [match_replace, ]
Then some _rules::
>>> A = wc("A")
>>> A_float = wc("A", head=float)
>>> Invert_A = pattern(Invert, A)
>>> Invert._rules.update([
... ('r1', (pattern_head(Invert_A), lambda A: A)),
... ('r2', (pattern_head(A_float), lambda A: 1./A)),
... ])
Check rule application::
>>> print(srepr(Invert.create("hallo"))) # matches no rule
Invert('hallo')
>>> Invert.create(Invert("hallo")) # matches first rule
'hallo'
>>> Invert.create(.2) # matches second rule
5.0
A pattern can also have the same wildcard appear twice::
>>> class X(Operation):
... _rules = {
... 'r1': (pattern_head(A, A), lambda A: A),
... }
... simplifications = [match_replace, ]
>>> X.create(1,2)
X(1, 2)
>>> X.create(1,1)
1 |
384,172 | def handle_modifier_up(self, modifier):
_logger.debug("%s released", modifier)
if modifier not in (Key.CAPSLOCK, Key.NUMLOCK):
self.modifiers[modifier] = False | Updates the state of the given modifier key to 'released'. |
384,173 | def estimate(self, X, **params):
return super(TRAM, self).estimate(X, **params) | Parameters
----------
X : tuple of (ttrajs, dtrajs, btrajs)
Simulation trajectories. ttrajs contain the indices of the thermodynamic state, dtrajs
contains the indices of the configurational states and btrajs contain the biases.
ttrajs : list of numpy.ndarray(X_i, dtype=int)
Every element is a trajectory (time series). ttrajs[i][t] is the index of the
thermodynamic state visited in trajectory i at time step t.
dtrajs : list of numpy.ndarray(X_i, dtype=int)
dtrajs[i][t] is the index of the configurational state (Markov state) visited in
trajectory i at time step t.
btrajs : list of numpy.ndarray((X_i, T), dtype=numpy.float64)
For every simulation frame seen in trajectory i and time step t, btrajs[i][t,k] is the
bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at the k'th
Umbrella/Hamiltonian/temperature). |
384,174 | def edge_tuple(self, vertex0_id, vertex1_id):
pw0 = self.__getitem__(vertex0_id)
pw1 = self.__getitem__(vertex1_id)
if not pw0 or not pw1:
return None
if pw0 < pw1:
return (vertex0_id, vertex1_id)
elif pw0 > pw1:
return (vertex1_id, vertex0_id)
else:
return None | To avoid duplicate edges where the vertex ids are reversed,
we maintain that the vertex ids are ordered so that the corresponding
pathway names are alphabetical.
Parameters
-----------
vertex0_id : int
one vertex in the edge
vertex1_id : int
the other vertex in the edge
Returns
-----------
tup(int, int)|None, the edge id or None if the vertices do not
exist in the network or they map to the same pathway (there should not
be any self-loops in the network) |
384,175 | def add(self, key, value):
if not key in self.prefs:
self.prefs[key] = []
self.prefs[key].append(value) | Add an entry to a list preference
Add `value` to the list of entries for the `key` preference. |
384,176 | def interpret(self, config_dict):
value = config_dict.get(self.name)
if value is None:
if self.default is None:
raise RuntimeError( + self.name)
else:
warnings.warn("Using default {!r} for ".format(self.default, self.name),
DeprecationWarning)
if (str != self.value_type) and isinstance(self.default, self.value_type):
return self.default
else:
value = self.default
try:
if str == self.value_type:
return str(value)
if int == self.value_type:
return int(value)
if bool == self.value_type:
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
else:
raise RuntimeError(self.name + " must be True or False")
if float == self.value_type:
return float(value)
if list == self.value_type:
return value.split(" ")
except Exception:
raise RuntimeError("Error interpreting config item with value {!r} and type {}".format(
self.name, value, self.value_type))
raise RuntimeError("Unexpected configuration type: " + repr(self.value_type)) | Converts the config_parser output into the proper type,
supplies defaults if available and needed, and checks for some errors. |
384,177 | def all(user, groupby=, summary=, network=False,
split_week=False, split_day=False, filter_empty=True, attributes=True,
flatten=False):
scalar_type = if groupby is not None else
summary_type = if groupby is not None else
number_of_interactions_in = partial(bc.individual.number_of_interactions, direction=)
number_of_interactions_in.__name__ =
number_of_interactions_out = partial(bc.individual.number_of_interactions, direction=)
number_of_interactions_out.__name__ =
functions = [
(bc.individual.active_days, scalar_type),
(bc.individual.number_of_contacts, scalar_type),
(bc.individual.call_duration, summary_type),
(bc.individual.percent_nocturnal, scalar_type),
(bc.individual.percent_initiated_conversations, scalar_type),
(bc.individual.percent_initiated_interactions, scalar_type),
(bc.individual.response_delay_text, summary_type),
(bc.individual.response_rate_text, scalar_type),
(bc.individual.entropy_of_contacts, scalar_type),
(bc.individual.balance_of_contacts, summary_type),
(bc.individual.interactions_per_contact, summary_type),
(bc.individual.interevent_time, summary_type),
(bc.individual.percent_pareto_interactions, scalar_type),
(bc.individual.percent_pareto_durations, scalar_type),
(bc.individual.number_of_interactions, scalar_type),
(number_of_interactions_in, scalar_type),
(number_of_interactions_out, scalar_type),
(bc.spatial.number_of_antennas, scalar_type),
(bc.spatial.entropy_of_antennas, scalar_type),
(bc.spatial.percent_at_home, scalar_type),
(bc.spatial.radius_of_gyration, scalar_type),
(bc.spatial.frequent_antennas, scalar_type),
(bc.spatial.churn_rate, scalar_type)
]
if user.has_recharges:
functions += [
(bc.recharge.amount_recharges, summary_type),
(bc.recharge.interevent_time_recharges, summary_type),
(bc.recharge.percent_pareto_recharges, scalar_type),
(bc.recharge.number_of_recharges, scalar_type),
(bc.recharge.average_balance_recharges, scalar_type)
]
network_functions = [
bc.network.clustering_coefficient_unweighted,
bc.network.clustering_coefficient_weighted,
bc.network.assortativity_attributes,
bc.network.assortativity_indicators
]
groups = list(group_records(user.records, groupby=groupby))
bins_with_data = len(groups)
groups = list(group_records_with_padding(user.records, groupby=groupby))
bins = len(groups)
bins_without_data = bins - bins_with_data
reporting = OrderedDict([
(, user.antennas_path),
(, user.attributes_path),
(, user.attributes_path),
(, bc.__version__),
(, bc.helper.tools.bandicoot_code_signature()),
(, groupby),
(, split_week),
(, split_day),
(, user.start_time and str(user.start_time)),
(, user.end_time and str(user.end_time)),
(, str(user.night_start)),
(, str(user.night_end)),
(, user.weekend),
(, len(user.records)),
(, len(user.antennas)),
(, len(user.recharges)),
(, bins),
(, bins_with_data),
(, bins_without_data),
(, user.has_call),
(, user.has_text),
(, user.has_home),
(, user.has_recharges),
(, user.has_attributes),
(, user.has_network),
(, bc.helper.tools.percent_records_missing_location(user)),
(, bc.helper.tools.antennas_missing_locations(user)),
(, user.percent_outofnetwork_calls),
(, user.percent_outofnetwork_texts),
(, user.percent_outofnetwork_contacts),
(, user.percent_outofnetwork_call_durations),
])
if user.ignored_records is not None:
reporting[] = OrderedDict(user.ignored_records)
returned = OrderedDict([
(, user.name),
(, reporting)
])
for fun, datatype in functions:
try:
metric = fun(user, groupby=groupby, summary=summary,
datatype=datatype, filter_empty=filter_empty,
split_week=split_week, split_day=split_day)
except ValueError:
metric = fun(user, groupby=groupby, datatype=datatype,
split_week=split_week, filter_empty=filter_empty,
split_day=split_day)
returned[fun.__name__] = metric
if network and user.has_network:
for fun in network_functions:
returned[fun.__name__] = fun(user)
if attributes and user.attributes != {}:
returned[] = OrderedDict(user.attributes)
if flatten is True:
return globals()[](returned)
return returned | Returns a dictionary containing all bandicoot indicators for the user,
as well as reporting variables.
Relevant indicators are defined in the 'individual', and 'spatial' modules.
=================================== =======================================================================
Reporting variables Description
=================================== =======================================================================
antennas_path path of the CSV file containing antennas locations
attributes_path directory where attributes were loaded
version bandicoot version
groupby grouping method ('week' or None)
split_week whether or not indicators are also computed for weekday and weekend
split_day whether or not indicators are also computed for day and night
start_time time of the first record
end_time time of the last record
night_start, night_end start and end time to define nights
weekend days used to define the weekend (``[6, 7]`` by default, where 1 is Monday)
bins number of weeks if the record are grouped
has_call whether or not records include calls
has_text whether or not records include texts
has_home whether or not a :meth:`home location <bandicoot.core.User.recompute_home>` has been found
has_network whether or not correspondents where loaded
percent_records_missing_location percentage of records without location
antennas_missing_locations number of antennas missing a location
percent_outofnetwork_calls percentage of calls, received or emitted, made with a correspondant not loaded in the network
percent_outofnetwork_texts percentage of texts with contacts not loaded in the network
percent_outofnetwork_contacts percentage of contacts not loaded in the network
percent_outofnetwork_call_durations percentage of minutes of calls where the contact was not loaded in the network
number_of_records total number of records
number_of_weeks number of weeks with records
=================================== =======================================================================
We also include a last set of reporting variables, for the records ignored
at load-time. Values can be ignored due to missing or inconsistent fields
(e.g., not including a valid 'datetime' value).
.. code-block:: python
{
'all': 0,
'interaction': 0,
'direction': 0,
'correspondent_id': 0,
'datetime': 0,
'call_duration': 0
}
with the total number of records ignored (key ``'all'``), as well as the
number of records with faulty values for each columns. |
384,178 | def comments(self):
if self._comments is None:
self.assert_bind_client()
if self.comment_count > 0:
self._comments = self.bind_client.get_activity_comments(self.id)
else:
self._comments = []
return self._comments | Iterator of :class:`stravalib.model.ActivityComment` objects for this activity. |
384,179 | def fftlog(fEM, time, freq, ftarg):
r
_, _, q, mu, tcalc, dlnr, kr, rk = ftarg
if mu > 0:
a = -fEM.imag
else:
a = fEM.real
n = a.size
ln2kr = np.log(2.0/kr)
d = np.pi/(n*dlnr)
m = np.arange(1, (n+1)/2)
y = m*d
if q == 0:
zp = special.loggamma((mu + 1)/2.0 + 1j*y)
arg = 2.0*(ln2kr*y + zp.imag)
else:
xp = (mu + 1.0 + q)/2.0
xm = (mu + 1.0 - q)/2.0
zp = special.loggamma(xp + 0j)
zm = special.loggamma(xm + 0j)
amp = np.exp(np.log(2.0)*q + zp.real - zm.real)
arg = zp.imag + zm.imag
argcos1 = amp*np.cos(arg)
zp = special.loggamma(xp + 1j*y)
zm = special.loggamma(xm + 1j*y)
argamp = np.exp(np.log(2.0)*q + zp.real - zm.real)
arg = 2*ln2kr*y + zp.imag + zm.imag
argcos = np.cos(arg)
argsin = np.sin(arg)
jc = np.array((n + 1)/2.0)
j = np.arange(n)+1
a *= np.exp(-(q - 0.5)*(j - jc)*dlnr)
a = fftpack.rfft(a)
m = np.arange(1, n/2, dtype=int)
if q == 0:
ar = a[2*m-1]
ai = a[2*m]
a[2*m-1] = ar*argcos[:-1] - ai*argsin[:-1]
a[2*m] = ar*argsin[:-1] + ai*argcos[:-1]
if np.mod(n, 2) == 0:
ar = argcos[-1]
a[-1] *= ar
else:
ar = a[2*m-1]
ai = a[2*m]
a[2*m-1] = ar*argcos[:-1] - ai*argsin[:-1]
a[2*m] = ar*argsin[:-1] + ai*argcos[:-1]
a[0] *= argcos1
a[2*m-1] *= argamp[:-1]
a[2*m] *= argamp[:-1]
if np.mod(n, 2) == 0:
m = int(n/2)-3
ar = argcos[m-1]*argamp[m-1]
a[-1] *= ar
a = fftpack.irfft(a)
a = a[::-1]*np.exp(-((q + 0.5)*(j - jc)*dlnr + q*np.log(kr) -
np.log(rk)/2.0))
ttEM = iuSpline(np.log(tcalc), a)
tEM = ttEM(np.log(time))
return tEM, True | r"""Fourier Transform using FFTLog.
FFTLog is the logarithmic analogue to the Fast Fourier Transform FFT.
FFTLog was presented in Appendix B of [Hami00]_ and published at
<http://casa.colorado.edu/~ajsh/FFTLog>.
This function uses a simplified version of ``pyfftlog``, which is a
python-version of ``FFTLog``. For more details regarding ``pyfftlog`` see
<https://github.com/prisae/pyfftlog>.
Not the full flexibility of ``FFTLog`` is available here: Only the
logarithmic FFT (``fftl`` in ``FFTLog``), not the Hankel transform (``fht``
in ``FFTLog``). Furthermore, the following parameters are fixed:
- ``kr`` = 1 (initial value)
- ``kropt`` = 1 (silently adjusts ``kr``)
- ``dir`` = 1 (forward)
Furthermore, ``q`` is restricted to -1 <= q <= 1.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
tEM : array
Returns time-domain EM response of ``fEM`` for given ``time``.
conv : bool
Only relevant for QWE/QUAD. |
384,180 | def update_service(self, stack, service, args):
url = .format(self.host, stack, service)
return self.__post(url, args) | 更新服务
更新指定名称服务的配置如容器镜像等参数,容器被重新部署后生效。
如果指定manualUpdate参数,则需要额外调用 部署服务 接口并指定参数进行部署;处于人工升级模式的服务禁止执行其他修改操作。
如果不指定manualUpdate参数,平台会自动完成部署。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
- args: 服务具体描述请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息 |
384,181 | def insert_instance(self, block):
embed_type = block.get(, None)
data = block.get(, {})
serializer = self.serializers.get(embed_type, None)
if serializer is None:
return block
try:
instance_id = serializer.get_id(data)
instance = self.instances[embed_type][instance_id]
data[embed_type] = serializer.serialize(instance)
except:
data[embed_type] = None
block[] = data
return block | Insert a fetched instance into embed block. |
384,182 | def radialvelocity(self, rf=, v0=, off=None):
loc = {: "radialvelocity",
: rf,
: dq.quantity(v0)}
if is_measure(off):
if not off[] == "radialvelocity":
raise TypeError()
loc["offset"] = off
return self.measure(loc, rf) | Defines a radialvelocity measure. It has to specify a reference
code, radialvelocity quantity value (see introduction for the action
on a scalar quantity with either a vector or scalar value, and when
a vector of quantities is given), and optionally it can specify an
offset, which in itself has to be a radialvelocity.
:param rf: reference code string; Allowable reference
codes are: *LSRK LSRD BARY GEO TOPO GALACTO*
Note that additional ones may become available. Check with::
dm.list_codes(dm.radialvelocity())
:param v0: longitude or x as quantity or string
:param off: an optional offset measure of same type |
384,183 | def save(self):
if hasattr(self, ):
self.pre_save()
database, collection = self._collection_key.split()
self.validate()
_id = current()[database][collection].save(dict(self))
if _id: self._id = _id
if hasattr(self, ):
self.post_save() | Save this object to the database. Behaves very similarly to
whatever collection.save(document) would, ie. does upserts on _id
presence. If methods ``pre_save`` or ``post_save`` are defined, those
are called. If there is a spec document, then the document is
validated against it after the ``pre_save`` hook but before the save. |
384,184 | def _construct_version(self, function, intrinsics_resolver):
code_dict = function.Code
if not code_dict:
raise ValueError("Lambda function code must be a valid non-empty dictionary")
if not intrinsics_resolver:
raise ValueError("intrinsics_resolver is required for versions creation")
lambda_version.Description = self.VersionDescription
return lambda_version | Constructs a Lambda Version resource that will be auto-published when CodeUri of the function changes.
Old versions will not be deleted without a direct reference from the CloudFormation template.
:param model.lambda_.LambdaFunction function: Lambda function object that is being connected to a version
:param model.intrinsics.resolver.IntrinsicsResolver intrinsics_resolver: Class that can help resolve
references to parameters present in CodeUri. It is a common usecase to set S3Key of Code to be a
template parameter. Need to resolve the values otherwise we will never detect a change in Code dict
:return: Lambda function Version resource |
384,185 | def parse_prototype(prototype):
val = .join(prototype.splitlines())
f = match(func_pat, val)
if f is None:
raise Exception(.format(val))
ftp, pointer, name, arg = [v.strip() for v in f.groups()]
args = []
if arg.strip():
for item in split(arg_split_pat, arg):
m = match(variable_pat, item.strip())
if m is None:
raise Exception(.format(val))
tp, star, nm, count = [v.strip() if v else for v in m.groups()]
args.append(VariableSpec(tp, star, nm, count))
return FunctionSpec(, ftp, pointer, name, args) | Returns a :attr:`FunctionSpec` instance from the input. |
384,186 | def get_config():
global token
config = configparser.ConfigParser()
config.read(os.path.join(os.path.expanduser(), ))
try:
token = config[][]
path = config[][]
except:
logger.error()
logger.error()
sys.exit()
if os.path.exists(path):
os.chdir(path)
else:
logger.error()
sys.exit() | Reads the music download filepath from scdl.cfg |
384,187 | def validate_lang(ctx, param, lang):
if ctx.params[]:
return lang
try:
if lang not in tts_langs():
raise click.UsageError(
" not in list of supported languages.\n"
"Use --all to list languages or "
"add --nocheck to disable language check." % lang)
else:
ctx.params[] = True
except RuntimeError as e:
log.debug(str(e), exc_info=True)
return lang | Validation callback for the <lang> option.
Ensures <lang> is a supported language unless the <nocheck> flag is set |
384,188 | def compute(cls, observation, prediction):
assert isinstance(observation, dict)
try:
p_value = prediction[]
try:
p_value = prediction[]
p_value = prediction
o_mean = observation[]
o_std = observation[]
value = (p_value - o_mean)/o_std
value = utils.assert_dimensionless(value)
if np.isnan(value):
score = InsufficientDataScore()
else:
score = ZScore(value)
return score | Compute a z-score from an observation and a prediction. |
384,189 | def parse_raml(self):
if utils.is_url(self.ramlfile):
raml = utils.download_file(self.ramlfile)
else:
with codecs.open(self.ramlfile, "rb", encoding="utf-8") as raml_f:
raml = raml_f.read()
loader = ramlfications.loads(raml)
config = ramlfications.setup_config(self.ramlconfig)
self.raml = ramlfications.parse_raml(loader, config) | Parse RAML file |
384,190 | def main(args):
of = sys.stdout
if args.output and args.output[-4:] == :
cmd = +args.output
pof = Popen(cmd.split(),stdin=PIPE)
of = pof.stdin
elif args.output:
of = open(args.output,)
header = None
if args.HQ:
cmd = +args.HQ
sys.stderr.write(cmd+"\n")
header = Popen(cmd.split(),stdout=PIPE).communicate()[0]
of.write(header)
if (not header) and args.HQCorrected:
cmd = +args.HQCorrected
sys.stderr.write(cmd+"\n")
header = Popen(cmd.split(),stdout=PIPE).communicate()[0]
of.write(header)
if (not header) and args.AQ:
cmd = +args.AQ
sys.stderr.write(cmd+"\n")
header = Popen(cmd.split(),stdout=PIPE).communicate()[0]
of.write(header)
if (not header) and args.AQCorrected:
cmd = +args.AQCorrected
sys.stderr.write(cmd+"\n")
header = Popen(cmd.split(),stdout=PIPE).communicate()[0]
of.write(header)
if (not header) and args.subreads:
cmd = +args.subreads
sys.stderr.write(cmd+"\n")
header = Popen(cmd.split(),stdout=PIPE).communicate()[0]
of.write(header)
if (not header) and args.subreadsCorrected:
cmd = +args.subreadsCorrected
sys.stderr.write(cmd+"\n")
header = Popen(cmd.split(),stdout=PIPE).communicate()[0]
of.write(header)
_nameprog = re.compile()
negative_filter = set()
negative_filter = get_best_set(negative_filter,,of,args,True)
get_best_set(negative_filter,,of,args,False)
if args.output and args.output[-4:] == :
pof.communicate()
else:
of.close() | Use the valid input file to get the header information. |
384,191 | def process_into(self, node, obj):
if isinstance(node, BeautifulSoup.NavigableString):
text = self.process_text(node)
if text:
obj.append(text)
return
if node.name == :
new_obj = document.Paragraph()
obj.append(new_obj)
obj = new_obj
elif node.name == :
new_obj = document.List()
obj.append(new_obj)
obj = new_obj
elif node.name == :
new_obj = document.ListEntry()
obj.append(new_obj)
obj = new_obj
for child in node:
self.process_into(child, obj) | Process a BeautifulSoup node and fill its elements into a pyth
base object. |
384,192 | def dump_image_data(dataset_dir, data_dir, dataset, color_array_info, root=None, compress=True):
if root is None:
root = {}
root[] =
container = root
container[] = dataset.GetSpacing()
container[] = dataset.GetOrigin()
container[] = dataset.GetExtent()
dump_all_arrays(dataset_dir, data_dir, dataset, container, compress)
return root | Dump image data object to vtkjs |
384,193 | def afterglow(self, src=None, event=None, dst=None, **kargs):
if src is None:
src = lambda x: x[].src
if event is None:
event = lambda x: x[].dport
if dst is None:
dst = lambda x: x[].dst
sl = {}
el = {}
dl = {}
for i in self.res:
try:
s, e, d = src(i), event(i), dst(i)
if s in sl:
n, lst = sl[s]
n += 1
if e not in lst:
lst.append(e)
sl[s] = (n, lst)
else:
sl[s] = (1, [e])
if e in el:
n, lst = el[e]
n += 1
if d not in lst:
lst.append(d)
el[e] = (n, lst)
else:
el[e] = (1, [d])
dl[d] = dl.get(d, 0) + 1
except Exception:
continue
import math
def normalize(n):
return 2 + math.log(n) / 4.0
def minmax(x):
m, M = reduce(lambda a, b: (min(a[0], b[0]), max(a[1], b[1])),
((a, a) for a in x))
if m == M:
m = 0
if M == 0:
M = 1
return m, M
mins, maxs = minmax(x for x, _ in six.itervalues(sl))
mine, maxe = minmax(x for x, _ in six.itervalues(el))
mind, maxd = minmax(six.itervalues(dl))
gr =
gr += "
for s in sl:
n, _ = sl[s]
n = 1 + float(n - mins) / (maxs - mins)
gr += % (repr(s), repr(s), n, n)
gr += "
for e in el:
n, _ = el[e]
n = n = 1 + float(n - mine) / (maxe - mine)
gr += % (repr(e), repr(e), n, n)
for d in dl:
n = dl[d]
n = n = 1 + float(n - mind) / (maxd - mind)
gr += % (repr(d), repr(d), n, n)
gr += "
for s in sl:
n, lst = sl[s]
for e in lst:
gr += % (repr(s), repr(e))
for e in el:
n, lst = el[e]
for d in lst:
gr += % (repr(e), repr(d))
gr += "}"
return do_graph(gr, **kargs) | Experimental clone attempt of http://sourceforge.net/projects/afterglow
each datum is reduced as src -> event -> dst and the data are graphed.
by default we have IP.src -> IP.dport -> IP.dst |
384,194 | def get_stp_mst_detail_output_cist_migrate_time(self, **kwargs):
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
migrate_time = ET.SubElement(cist, "migrate-time")
migrate_time.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
384,195 | def get_ccle_mutations():
if request.method == :
return {}
response = request.body.read().decode()
body = json.loads(response)
gene_list = body.get()
cell_lines = body.get()
mutations = cbio_client.get_ccle_mutations(gene_list, cell_lines)
res = {: mutations}
return res | Get CCLE mutations
returns the amino acid changes for a given list of genes and cell lines |
384,196 | def get_mean(self, col, row):
return javabridge.call(self.jobject, "getMean", "(II)D", col, row) | Returns the mean at this location (if valid location).
:param col: the 0-based column index
:type col: int
:param row: the 0-based row index
:type row: int
:return: the mean
:rtype: float |
384,197 | def gen_textfiles_from_filenames(
filenames: Iterable[str]) -> Generator[TextIO, None, None]:
for filename in filenames:
with open(filename) as f:
yield f | Generates file-like objects from a list of filenames.
Args:
filenames: iterable of filenames
Yields:
each file as a :class:`TextIO` object |
384,198 | def subdomain(self, index, value=None):
if value is not None:
subdomains = self.subdomains()
subdomains[index] = value
return URL._mutate(self, host=.join(subdomains))
return self.subdomains()[index] | Return a subdomain or set a new value and return a new :class:`URL`
instance.
:param integer index: 0-indexed subdomain
:param string value: New subdomain |
384,199 | def find_stack_elements(self, module, module_name="", _visited_modules=None):
from types import ModuleType
if _visited_modules is None: _visited_modules = []
_visited_modules.append(module)
elements = []
for el_name in dir(module):
the_el = module.__getattribute__(el_name)
if isinstance(the_el, ModuleType):
if the_el in _visited_modules:
continue
elements = elements + self.find_stack_elements(the_el, module_name + el_name + ".", _visited_modules)
elif isinstance(the_el, StackElement):
elements.append((module_name, el_name, the_el))
return elements | This function goes through the given container and returns the stack elements. Each stack
element is represented by a tuple:
( container_name, element_name, stack_element)
The tuples are returned in an array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.