Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
384,300 | def create(self, ip_access_control_list_sid):
data = values.of({: ip_access_control_list_sid, })
payload = self._version.create(
,
self._uri,
data=data,
)
return IpAccessControlListMappingInstance(
self._version,
payload,
account_sid=self._solution[],
domain_sid=self._solution[],
) | Create a new IpAccessControlListMappingInstance
:param unicode ip_access_control_list_sid: The unique id of the IP access control list to map to the SIP domain
:returns: Newly created IpAccessControlListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.ip_access_control_list_mapping.IpAccessControlListMappingInstance |
384,301 | def search(self, entity_type, property_name, search_string, start_index=0, max_results=99999):
params = {
"entity-type": entity_type,
"expand": entity_type,
"property-search-restriction": {
"property": {"name": property_name, "type": "STRING"},
"match-mode": "CONTAINS",
"value": search_string,
}
}
params = {
: entity_type,
: entity_type,
: start_index,
: max_results
}
root = etree.Element()
property_ = etree.Element()
prop_name = etree.Element()
prop_name.text = property_name
property_.append(prop_name)
prop_type = etree.Element()
prop_type.text =
property_.append(prop_type)
root.append(property_)
match_mode = etree.Element()
match_mode.text =
root.append(match_mode)
value = etree.Element()
value.text = search_string
root.append(value)
payload = + etree.tostring(root).decode()
response = session.post(self.rest_url + "/search", params=params, data=payload, timeout=self.timeout)
if not response.ok:
return None
return response.json() | Performs a user search using the Crowd search API.
https://developer.atlassian.com/display/CROWDDEV/Crowd+REST+Resources#CrowdRESTResources-SearchResource
Args:
entity_type: 'user' or 'group'
property_name: eg. 'email', 'name'
search_string: the string to search for.
start_index: starting index of the results (default: 0)
max_results: maximum number of results returned (default: 99999)
Returns:
json results:
Returns search results. |
384,302 | def get_redirect_target():
for target in request.values.get(), request.referrer:
if target and is_local_url(target):
return target | Get URL to redirect to and ensure that it is local. |
384,303 | def filename(self):
self._filename = getattr(self, , None)
self._root_path = getattr(self, , None)
if self._filename is None and self._root_path is None:
return self._filename_global()
else:
return self._filename_projects() | Defines the name of the configuration file to use. |
384,304 | def _find_zone_by_id(self, zone_id):
if not self.zones:
return None
zone = list(filter(
lambda zone: zone.id == zone_id, self.zones))
return zone[0] if zone else None | Return zone by id. |
384,305 | def find_two_letter_edits(word_string):
if word_string is None:
return {}
elif isinstance(word_string, str):
return (e2 for e1 in find_one_letter_edits(word_string) for e2 in find_one_letter_edits(e1))
else:
raise InputError("string or none type variable not passed as argument to find_two_letter_edits") | Finds all possible two letter edits of word_string:
- Splitting word_string into two words at all character locations
- Deleting one letter at all character locations
- Switching neighbouring characters
- Replacing a character with every alphabetical letter
- Inserting all possible alphabetical characters between each character location including boundaries
This can be seen as a reapplication of find_one_letter_edits to all words found via a first
instantiation of find_one_letter_edits on word_string.
Returns all two letter edits as a set instance. |
384,306 | def copy(self, *args, **kwargs):
for slot in self.__slots__:
attr = getattr(self, slot)
if slot[0] == :
slot = slot[1:]
if slot not in kwargs:
kwargs[slot] = attr
result = type(self)(*args, **kwargs)
return result | Copy this model element and contained elements if they exist. |
384,307 | def __on_message(self, ws, msg):
msg = json.loads(msg)
logging.debug("ConnectorDB:WS: Msg ", msg["stream"])
stream_key = msg["stream"] + ":"
if "transform" in msg:
stream_key += msg["transform"]
self.subscription_lock.acquire()
if stream_key in self.subscriptions:
subscription_function = self.subscriptions[stream_key]
self.subscription_lock.release()
fresult = subscription_function(msg["stream"], msg["data"])
if fresult is True:
fresult = msg["data"]
if fresult is not False and fresult is not None and msg["stream"].endswith(
"/downlink") and msg["stream"].count("/") == 3:
self.insert(msg["stream"][:-9], fresult)
else:
self.subscription_lock.release()
logging.warn(
"ConnectorDB:WS: Msg not subscribed! Subscriptions: %s",
msg["stream"], list(self.subscriptions.keys())) | This function is called whenever there is a message received from the server |
384,308 | def _attach_params(self, params, **kwargs):
lst = params.to_list() if isinstance(params, ParameterSet) else params
for param in lst:
param._bundle = self
for k, v in kwargs.items():
self._params.append(param)
self._check_copy_for()
return | Attach a list of parameters (or ParameterSet) to this ParameterSet.
:parameter list params: list of parameters, or ParameterSet
:parameter **kwargs: attributes to set for each parameter (ie tags) |
384,309 | def _get_position_from_instance(self, instance, ordering):
qs_order = getattr(instance, )
result = super(SequenceCursorPagination, self)._get_position_from_instance(instance, ordering[1:])
return (qs_order, result) | The position will be a tuple of values:
The QuerySet number inside of the QuerySetSequence.
Whatever the normal value taken from the ordering property gives. |
384,310 | def _sort_locations(locations, expand_dir=False):
files = []
urls = []
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == :
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith()
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls | Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls) |
384,311 | def GetCpuReservationMHz(self):
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetCpuReservationMHz(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | Retrieves the minimum processing power in MHz reserved for the virtual
machine. For information about setting a CPU reservation, see "Limits and
Reservations" on page 14. |
384,312 | def copy(self):
health = self.health, self.health_max
r = self.r, self.r_max
g = self.g, self.g_max
b = self.b, self.b_max
y = self.y, self.y_max
x = self.x, self.x_max
m = self.m, self.m_max
h = self.h, self.h_max
c = self.c, self.c_max
return self.__class__(self.name, health, r, g, b, y, x, m, h, c) | Return a copy of this actor with the same attribute values. |
384,313 | def _to_http_hosts(hosts: Union[Iterable[str], str]) -> List[str]:
if isinstance(hosts, str):
hosts = hosts.replace(, ).split()
return [_to_http_uri(i) for i in hosts] | Convert a string of whitespace or comma separated hosts into a list of hosts.
Hosts may also already be a list or other iterable.
Each host will be prefixed with 'http://' if it is not already there.
>>> _to_http_hosts('n1:4200,n2:4200')
['http://n1:4200', 'http://n2:4200']
>>> _to_http_hosts('n1:4200 n2:4200')
['http://n1:4200', 'http://n2:4200']
>>> _to_http_hosts('https://n1:4200')
['https://n1:4200']
>>> _to_http_hosts(['http://n1:4200', 'n2:4200'])
['http://n1:4200', 'http://n2:4200'] |
384,314 | def upload(self, array, fields=None, table="MyDB", configfile=None):
wsid=
password=
if configfile is None:
configfile = "CasJobs.config"
logger.info("Reading config file: %s"%configfile)
lines = open(configfile,).readlines()
for line in lines:
k,v = line.strip().split()
if k == : wsid = v
if k == : password = v
logger.info("Attempting to drop table: %s"%table)
self.drop(table)
SOAP_TEMPLATE =
logger.info("Writing array...")
s = io.StringIO()
np.savetxt(s,array,delimiter=,fmt="%.10g")
tb_data =
if fields is not None:
tb_data += .join(f for f in fields)+
tb_data += s.getvalue()
message = SOAP_TEMPLATE % (wsid, password, table, tb_data, "false")
webservice = httpcl.HTTP("skyserver.sdss3.org")
webservice.putrequest("POST", "/casjobs/services/jobs.asmx")
webservice.putheader("Host", "skyserver.sdss3.org")
webservice.putheader("Content-type", "text/xml; charset=\"UTF-8\"")
webservice.putheader("Content-length", "%d" % len(message))
webservice.endheaders()
logger.info("Sending SOAP POST message...")
webservice.send(message)
statuscode, statusmessage, header = webservice.getreply()
print("Response: ", statuscode, statusmessage)
print("headers: ", header)
res = webservice.getfile().read()
print(res) | Upload an array to a personal database using SOAP POST protocol.
http://skyserver.sdss3.org/casjobs/services/jobs.asmx?op=UploadData |
384,315 | def local_batch_predict(training_dir, prediction_input_file, output_dir, mode, batch_size,
shard_files, output_format):
from .prediction import predict as predict_module
if mode == :
model_dir = os.path.join(training_dir, )
elif mode == :
model_dir = os.path.join(training_dir, )
else:
raise ValueError()
if not file_io.file_exists(model_dir):
raise ValueError( % model_dir)
cmd = [,
% prediction_input_file,
% model_dir,
% output_dir,
% output_format,
% str(batch_size),
if shard_files else ,
if mode == else
]
return predict_module.main(cmd) | See batch_predict |
384,316 | def _handle_final_metric_data(self, data):
id_ = data[]
value = data[]
if id_ in _customized_parameter_ids:
self.tuner.receive_customized_trial_result(id_, _trial_params[id_], value)
else:
self.tuner.receive_trial_result(id_, _trial_params[id_], value) | Call tuner to process final results |
384,317 | def append(self, header, f, _left=False):
self.items_length += len(header)
if _left:
self.deque.appendleft((header, f))
else:
self.deque.append((header, f)) | Add a column to the table.
Args:
header (str):
Column header
f (function(datum)->str):
Makes the row string from the datum. Str returned by f should
have the same width as header. |
384,318 | def SpiceUDREPU(f):
@functools.wraps(f)
def wrapping_udrepu(beg, end, et):
f(beg, end, et)
return UDREPU(wrapping_udrepu) | Decorator for wrapping python functions in spice udrepu callback type
:param f: function to be wrapped
:type f: builtins.function
:return: wrapped udrepu function
:rtype: builtins.function |
384,319 | def plot_slippage_sensitivity(returns, positions, transactions,
ax=None, **kwargs):
if ax is None:
ax = plt.gca()
avg_returns_given_slippage = pd.Series()
for bps in range(1, 100):
adj_returns = txn.adjust_returns_for_slippage(returns, positions,
transactions, bps)
avg_returns = ep.annual_return(adj_returns)
avg_returns_given_slippage.loc[bps] = avg_returns
avg_returns_given_slippage.plot(alpha=1.0, lw=2, ax=ax)
ax.set_title()
ax.set_xticks(np.arange(0, 100, 10))
ax.set_ylabel()
ax.set_xlabel()
return ax | Plots curve relating per-dollar slippage to average annual returns.
Parameters
----------
returns : pd.Series
Timeseries of portfolio returns to be adjusted for various
degrees of slippage.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on. |
384,320 | def validate(self):
assert self.path, "{} must have a path".format(self.__class__.__name__)
ext = extract_path_ext(self.path, default_ext=self.subtitlesformat)
if ext not in self.allowed_formats and ext not in CONVERTIBLE_FORMATS[self.get_preset()]:
raise ValueError(.format(ext, self.path)) | Ensure `self.path` has one of the extensions in `self.allowed_formats`. |
384,321 | def dictionary(self) -> dict:
self.config.read(self.filepath)
return self.config._sections | Get a python dictionary of contents. |
384,322 | def download_song_by_id(self, song_id, song_name, folder=):
try:
url = self.crawler.get_song_url(song_id)
if self.lyric:
lyric_info = self.crawler.get_song_lyric(song_id)
else:
lyric_info = None
song_name = song_name.replace(, )
song_name = song_name.replace(, )
self.crawler.get_song_by_url(url, song_name, folder, lyric_info)
except RequestException as exception:
click.echo(exception) | Download a song by id and save it to disk.
:params song_id: song id.
:params song_name: song name.
:params folder: storage path. |
384,323 | def licenses_configured(name, licenses=None):
t, it creates it
Check if license is assigned to the cluster:
- if its no space
- if it
ret = {: name,
: {},
: None,
: }
if not licenses:
raise salt.exceptions.ArgumentValueError()
cluster_name, datacenter_name = \
__salt__[]()[], \
__salt__[]()[]
display_name = .format(datacenter_name, cluster_name)
log.info(%s\, display_name)
log.trace(, licenses)
entity = {: ,
: datacenter_name,
: cluster_name}
log.trace(, entity)
comments = []
changes = {}
old_licenses = []
new_licenses = []
has_errors = False
needs_changes = False
try:
log.trace()
schema = LicenseSchema.serialize()
try:
jsonschema.validate({: licenses}, schema)
except jsonschema.exceptions.ValidationError as exc:
raise salt.exceptions.InvalidLicenseError(exc)
si = __salt__[]()
existing_licenses = __salt__[](
service_instance=si)
remaining_licenses = existing_licenses[:]
for license_name, license in licenses.items():
filtered_licenses = [l for l in existing_licenses
if l[] == license]
if not filtered_licenses:
comments.append({1}\
{2}\
.format(name, license_name, display_name))
log.info(comments[-1])
continue
else:
try:
existing_license = __salt__[](
key=license, description=license_name,
service_instance=si)
except salt.exceptions.VMwareApiError as ex:
comments.append(ex.err_msg)
log.error(comments[-1])
has_errors = True
continue
comments.append({0}\
.format(license_name))
log.info(comments[-1])
else:
comments.append({0}\
.format(license_name))
log.info(comments[-1])
existing_license = filtered_licenses[0]
log.trace()
assigned_licenses = __salt__[](
entity=entity,
entity_display_name=display_name,
service_instance=si)
already_assigned_license = assigned_licenses[0] if \
assigned_licenses else None
if already_assigned_license and \
already_assigned_license[] == license:
comments.append({0}\
{1}\
.format(license_name, display_name))
log.info(comments[-1])
continue
needs_changes = True
if existing_license[] <= existing_license[]:
comments.append({0}\
{1}\
.format(license_name, display_name))
log.error(comments[-1])
has_errors = True
continue
if __opts__[]:
comments.append({1}\
{2}\.format(
name, license_name, display_name))
log.info(comments[-1])
else:
try:
__salt__[](
license_key=license,
license_name=license_name,
entity=entity,
entity_display_name=display_name,
service_instance=si)
except salt.exceptions.VMwareApiError as ex:
comments.append(ex.err_msg)
log.error(comments[-1])
has_errors = True
continue
comments.append({0}\{1}\
.format(license_name, display_name))
log.info(comments[-1])
assigned_license = __salt__[](
entity=entity,
entity_display_name=display_name,
service_instance=si)[0]
assigned_license[] =
if already_assigned_license:
already_assigned_license[] =
if already_assigned_license and \
already_assigned_license[] == sys.maxsize:
already_assigned_license[] =
changes[license_name] = {: assigned_license,
: already_assigned_license}
continue
__salt__[](si)
ret.update({: True if (not needs_changes) else None if
__opts__[] else False if has_errors else True,
: .join(comments),
: changes if not __opts__[] else {}})
return ret
except salt.exceptions.CommandExecutionError as exc:
log.exception()
if si:
__salt__[](si)
ret.update({
: False,
: exc.strerror})
return ret | Configures licenses on the cluster entity
Checks if each license exists on the server:
- if it doesn't, it creates it
Check if license is assigned to the cluster:
- if it's not assigned to the cluster:
- assign it to the cluster if there is space
- error if there's no space
- if it's assigned to the cluster nothing needs to be done |
384,324 | def _execute_wk(*args, input=None):
wk_args = (WK_PATH,) + args
return subprocess.run(wk_args, input=input, stdout=subprocess.PIPE, stderr=subprocess.PIPE) | Generate path for the wkhtmltopdf binary and execute command.
:param args: args to pass straight to subprocess.Popen
:return: stdout, stderr |
384,325 | def path(self):
p = os.path.normpath(self._path)
if p.endswith():
p = p + os.path.sep
return p | Return path
:returns: path
:rtype: str
:raises: None |
384,326 | def ssh_reachable(self, tries=None, propagate_fail=True):
if not self.running():
return False
try:
ssh.get_ssh_client(
ip_addr=self.ip(),
host_name=self.name(),
ssh_tries=tries,
propagate_fail=propagate_fail,
ssh_key=self.virt_env.prefix.paths.ssh_id_rsa(),
username=self._spec.get(),
password=self._spec.get(),
)
except ssh.LagoSSHTimeoutException:
return False
return True | Check if the VM is reachable with ssh
Args:
tries(int): Number of tries to try connecting to the host
propagate_fail(bool): If set to true, this event will appear
in the log and fail the outter stage. Otherwise, it will be
discarded.
Returns:
bool: True if the VM is reachable. |
384,327 | def clear(self):
self.__cancel_timer()
self.__timer = None
self.__timer_args = None
self.__still_valid = False
self._value = None
super(TemporalDependency, self).clear() | Cleans up the manager. The manager can't be used after this method has
been called |
384,328 | def next_k_array(a):
k = len(a)
if k == 1 or a[0] + 1 < a[1]:
a[0] += 1
return a
a[0] = 0
i = 1
x = a[i] + 1
while i < k-1 and x == a[i+1]:
i += 1
a[i-1] = i - 1
x = a[i] + 1
a[i] = x
return a | Given an array `a` of k distinct nonnegative integers, sorted in
ascending order, return the next k-array in the lexicographic
ordering of the descending sequences of the elements [1]_. `a` is
modified in place.
Parameters
----------
a : ndarray(int, ndim=1)
Array of length k.
Returns
-------
a : ndarray(int, ndim=1)
View of `a`.
Examples
--------
Enumerate all the subsets with k elements of the set {0, ..., n-1}.
>>> n, k = 4, 2
>>> a = np.arange(k)
>>> while a[-1] < n:
... print(a)
... a = next_k_array(a)
...
[0 1]
[0 2]
[1 2]
[0 3]
[1 3]
[2 3]
References
----------
.. [1] `Combinatorial number system
<https://en.wikipedia.org/wiki/Combinatorial_number_system>`_,
Wikipedia. |
384,329 | def autocorrelate(data, unbias=2, normalize=2):
coefficients = correlate(data, data, )
size = np.int(coefficients.size/2)
coefficients = coefficients[size:]
N = coefficients.size
if unbias:
if unbias == 1:
coefficients /= (N - np.arange(N))
elif unbias == 2:
coefficient_ratio = coefficients[0]/coefficients[-1]
coefficients /= np.linspace(coefficient_ratio, 1, N)
else:
raise IOError("unbias should be set to 1, 2, or None")
if normalize:
if normalize == 1:
coefficients /= np.abs(coefficients[0])
elif normalize == 2:
coefficients /= np.max(np.abs(coefficients))
else:
raise IOError("normalize should be set to 1, 2, or None")
return coefficients, N | Compute the autocorrelation coefficients for time series data.
Here we use scipy.signal.correlate, but the results are the same as in
Yang, et al., 2012 for unbias=1:
"The autocorrelation coefficient refers to the correlation of a time
series with its own past or future values. iGAIT uses unbiased
autocorrelation coefficients of acceleration data to scale the regularity
and symmetry of gait.
The autocorrelation coefficients are divided by :math:`fc(0)`,
so that the autocorrelation coefficient is equal to :math:`1` when :math:`t=0`:
.. math::
NFC(t) = \\frac{fc(t)}{fc(0)}
Here :math:`NFC(t)` is the normalised autocorrelation coefficient, and :math:`fc(t)` are
autocorrelation coefficients."
:param data: time series data
:type data: numpy array
:param unbias: autocorrelation, divide by range (1) or by weighted range (2)
:type unbias: integer or None
:param normalize: divide by 1st coefficient (1) or by maximum abs. value (2)
:type normalize: integer or None
:return coefficients: autocorrelation coefficients [normalized, unbiased]
:rtype coefficients: numpy array
:return N: number of coefficients
:rtype N: integer
:Examples:
>>> import numpy as np
>>> from mhealthx.signals import autocorrelate
>>> data = np.random.random(100)
>>> unbias = 2
>>> normalize = 2
>>> plot_test = True
>>> coefficients, N = autocorrelate(data, unbias, normalize, plot_test) |
384,330 | def chord(ref, est, **kwargs):
rreference.jamsestimated.jamschordchord
namespace =
ref = coerce_annotation(ref, namespace)
est = coerce_annotation(est, namespace)
ref_interval, ref_value = ref.to_interval_values()
est_interval, est_value = est.to_interval_values()
return mir_eval.chord.evaluate(ref_interval, ref_value,
est_interval, est_value, **kwargs) | r'''Chord evaluation
Parameters
----------
ref : jams.Annotation
Reference annotation object
est : jams.Annotation
Estimated annotation object
kwargs
Additional keyword arguments
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
See Also
--------
mir_eval.chord.evaluate
Examples
--------
>>> # Load in the JAMS objects
>>> ref_jam = jams.load('reference.jams')
>>> est_jam = jams.load('estimated.jams')
>>> # Select the first relevant annotations
>>> ref_ann = ref_jam.search(namespace='chord')[0]
>>> est_ann = est_jam.search(namespace='chord')[0]
>>> scores = jams.eval.chord(ref_ann, est_ann) |
384,331 | def _attach_to_model(self, model):
if not issubclass(model, ModelWithDynamicFieldMixin):
raise ImplementationError(
% (
model.__name__, self.name))
super(DynamicFieldMixin, self)._attach_to_model(model)
if self.dynamic_version_of is not None:
return
if hasattr(model, self.name):
return
setattr(model, self.name, self) | Check that the model can handle dynamic fields |
384,332 | def _revert_caffe2_pad(attr):
if len(attr) == 4:
attr = attr[:2]
elif len(attr) == 2:
pass
else:
raise ValueError("Invalid caffe2 type padding: {}".format(attr))
return attr | Removing extra padding from Caffe2. |
384,333 | def paint( self, painter, option, widget ):
if ( self._rebuildRequired ):
self.rebuild()
painter.setPen(self.borderColor())
if ( self.isSelected() ):
painter.setBrush(self.highlightColor())
else:
painter.setBrush(self.fillColor())
hints = painter.renderHints()
if ( not self.isAllDay() ):
painter.setRenderHint(painter.Antialiasing)
pen = painter.pen()
pen.setWidthF(0.25)
painter.setPen(pen)
painter.drawPath(self.path())
title = self.title()
painter.setPen(self.textColor())
for data in self._textData:
painter.drawText(*data)
painter.setRenderHints(hints) | Paints this item on the painter.
:param painter | <QPainter>
option | <QStyleOptionGraphicsItem>
widget | <QWidget> |
384,334 | def list_experiments(project_path,
sort=None,
output=None,
filter_op=None,
info_keys=None):
_check_tabulate()
base, experiment_folders, _ = next(os.walk(project_path))
experiment_data_collection = []
for experiment_dir in experiment_folders:
experiment_state = _get_experiment_state(
os.path.join(base, experiment_dir))
if not experiment_state:
logger.debug("No experiment state found in %s", experiment_dir)
continue
checkpoints = pd.DataFrame(experiment_state["checkpoints"])
runner_data = experiment_state["runner_data"]
time_values = {
"start_time": runner_data.get("_start_time"),
"last_updated": experiment_state.get("timestamp"),
}
formatted_time_values = {
key: datetime.fromtimestamp(val).strftime(TIMESTAMP_FORMAT)
if val else None
for key, val in time_values.items()
}
experiment_data = {
"name": experiment_dir,
"total_trials": checkpoints.shape[0],
"running_trials": (checkpoints["status"] == Trial.RUNNING).sum(),
"terminated_trials": (
checkpoints["status"] == Trial.TERMINATED).sum(),
"error_trials": (checkpoints["status"] == Trial.ERROR).sum(),
}
experiment_data.update(formatted_time_values)
experiment_data_collection.append(experiment_data)
if not experiment_data_collection:
print("No experiments found!")
sys.exit(0)
info_df = pd.DataFrame(experiment_data_collection)
if not info_keys:
info_keys = DEFAULT_PROJECT_INFO_KEYS
col_keys = [k for k in list(info_keys) if k in info_df]
if not col_keys:
print("None of keys {} in experiment data!".format(info_keys))
sys.exit(0)
info_df = info_df[col_keys]
if filter_op:
col, op, val = filter_op.split(" ")
col_type = info_df[col].dtype
if is_numeric_dtype(col_type):
val = float(val)
elif is_string_dtype(col_type):
val = str(val)
else:
raise ValueError("Unsupported dtype for \"{}\": {}".format(
val, col_type))
op = OPERATORS[op]
filtered_index = op(info_df[col], val)
info_df = info_df[filtered_index]
if sort:
if sort not in info_df:
raise KeyError("Sort Index \"{}\" not in: {}".format(
sort, list(info_df)))
info_df = info_df.sort_values(by=sort)
print_format_output(info_df)
if output:
file_extension = os.path.splitext(output)[1].lower()
if file_extension in (".p", ".pkl", ".pickle"):
info_df.to_pickle(output)
elif file_extension == ".csv":
info_df.to_csv(output, index=False)
else:
raise ValueError("Unsupported filetype: {}".format(output))
print("Output saved at:", output) | Lists experiments in the directory subtree.
Args:
project_path (str): Directory where experiments are located.
Corresponds to Experiment.local_dir.
sort (str): Key to sort by.
output (str): Name of file where output is saved.
filter_op (str): Filter operation in the format
"<column> <operator> <value>".
info_keys (list): Keys that are displayed. |
384,335 | def skip_if_empty(func):
@partial_safe_wraps(func)
def inner(value, *args, **kwargs):
if value is EMPTY:
return
else:
return func(value, *args, **kwargs)
return inner | Decorator for validation functions which makes them pass if the value
passed in is the EMPTY sentinal value. |
384,336 | def _get_containers(self):
infos = self.native_conn.list_containers_info()
return [self.cont_cls(self, i[], i[], i[])
for i in infos] | Return available containers. |
384,337 | def _sort(self, short_list, sorts):
sort_values = self._index_columns(sorts)
output = []
def _sort_more(short_list, i, sorts):
if len(sorts) == 0:
output.extend(short_list)
sort = sorts[0]
index = self._index[sort_values[i]]
if sort.sort == 1:
sorted_keys = sorted(index.keys())
elif sort.sort == -1:
sorted_keys = reversed(sorted(index.keys()))
else:
sorted_keys = list(index.keys())
for k in sorted_keys:
self._sort(index[k] & short_list, i + 1, sorts[1:])
_sort_more(short_list, 0, sorts)
return output | TAKE SHORTLIST, RETURN IT SORTED
:param short_list:
:param sorts: LIST OF SORTS TO PERFORM
:return: |
384,338 | def bls_snr(blsdict,
times,
mags,
errs,
assumeserialbls=False,
magsarefluxes=False,
sigclip=10.0,
npeaks=None,
perioddeltapercent=10,
ingressdurationfraction=0.1,
verbose=True):
bestperiodbestlspvalnbestpeaksnbestlspvalsnbestperiodslspvalsfrequenciesperiodsblsresultstepsizenfreqnphasebinsmintransitdurationmaxtransitdurationmethodblskwargsnbestperiodss None, then this will calculate the SNR for all
of them. If itnbestperiodsnpeaks: the number of periodogram peaks requested to get SNR for,
: list of refit best periods for each requested peak,
: list of refit epochs (i.e. mid-transit times),
:list of SNRs of the transit for each requested peak,
:list of depths of the transits,
:list of durations of the transits,
:the input value of nphasebins,
:the phase bin containing transit ingress,
:the phase bin containing transit egress,
:the full BLS models used along with its parameters,
:BLS models - phased light curves,
:the phase light curves,
: the phase values}
nbestperiodsnpeaks not specified or invalid, getting SNR for all %s BLS peaksnbestperiodsnbestperiodsnbestperiodskwargsverbosestartpendpsigclipblsresulttransdepthblsresulttransdurationbestperiodblsresulttransingressbinblsresulttransegressbinkwargsnphasebinssnrtransitdepthtransitdurationtransingressbintransegressbinnphasebinsperiodepochsubtractedmagsphasedmagsphasesblsmodelt enough points in the mag series, bail out
else:
LOGERROR()
nbestsnrs = None
transitdepth, transitduration = None, None
nphasebins, transingressbin, transegressbin = None, None, None
allsubtractedmags, allphases, allphasedmags = None, None, None
return {:npeaks,
:refitperiods,
:refitepochs,
:nbestsnrs,
:transitdepth,
:transitduration,
:nphasebins,
:transingressbin,
:transegressbin,
:allblsmodels,
:allsubtractedmags,
:allphasedmags,
:allphases} | Calculates the signal to noise ratio for each best peak in the BLS
periodogram, along with transit depth, duration, and refit period and epoch.
The following equation is used for SNR::
SNR = (transit model depth / RMS of LC with transit model subtracted)
* sqrt(number of points in transit)
Parameters
----------
blsdict : dict
This is an lspinfo dict produced by either `bls_parallel_pfind` or
`bls_serial_pfind` in this module, or by your own BLS function. If you
provide results in a dict from an external BLS function, make sure this
matches the form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'frequencies': the full array of frequencies considered,
'periods': the full array of periods considered,
'blsresult': list of result dicts from eebls.f wrapper functions,
'stepsize': the actual stepsize used,
'nfreq': the actual nfreq used,
'nphasebins': the actual nphasebins used,
'mintransitduration': the input mintransitduration,
'maxtransitduration': the input maxtransitdurations,
'method':'bls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
times,mags,errs : np.array
These contain the magnitude/flux time-series and any associated errors.
assumeserialbls : bool
If this is True, this function will not rerun BLS around each best peak
in the input lspinfo dict to refit the periods and epochs. This is
usally required for `bls_parallel_pfind` so set this to False if you use
results from that function. The parallel method breaks up the frequency
space into chunks for speed, and the results may not exactly match those
from a regular BLS run.
magsarefluxes : bool
Set to True if the input measurements in `mags` are actually fluxes and
not magnitudes.
npeaks : int or None
This controls how many of the periods in `blsdict['nbestperiods']` to
find the SNR for. If it's None, then this will calculate the SNR for all
of them. If it's an integer between 1 and
`len(blsdict['nbestperiods'])`, will calculate for only the specified
number of peak periods, starting from the best period.
perioddeltapercent : float
The fraction of the period provided to use to search around this
value. This is a percentage. The period range searched will then be::
[period - (perioddeltapercent/100.0)*period,
period + (perioddeltapercent/100.0)*period]
ingressdurationfraction : float
The fraction of the transit duration to use to generate an initial value
of the transit ingress duration for the BLS model refit. This will be
fit by this function.
verbose : bool
If True, will indicate progress and any problems encountered.
Returns
-------
dict
A dict of the following form is returned::
{'npeaks: the number of periodogram peaks requested to get SNR for,
'period': list of refit best periods for each requested peak,
'epoch': list of refit epochs (i.e. mid-transit times),
'snr':list of SNRs of the transit for each requested peak,
'transitdepth':list of depths of the transits,
'transitduration':list of durations of the transits,
'nphasebins':the input value of nphasebins,
'transingressbin':the phase bin containing transit ingress,
'transegressbin':the phase bin containing transit egress,
'allblsmodels':the full BLS models used along with its parameters,
'allsubtractedmags':BLS models - phased light curves,
'allphasedmags':the phase light curves,
'allphases': the phase values} |
384,339 | def setup_exchanges(app):
with app.producer_or_acquire() as P:
for q in app.amqp.queues.values():
P.maybe_declare(q) | Setup result exchange to route all tasks to platform queue. |
384,340 | def get_base_wrappers(method=, template_name=, predicates=(), wrappers=()):
wrappers += (preserve_view(MethodPredicate(method), *predicates),)
if template_name:
wrappers += (render_template(template_name),)
return wrappers | basic View Wrappers used by view_config. |
384,341 | def connect(self):
try:
settings = configparser.ConfigParser()
settings._interpolation = configparser.ExtendedInterpolation()
except Exception as err:
self.logger.error("Failed to instantiate config parser exception: %s" % err)
raise
try:
settings.read(self.__config__)
except Exception as err:
self.logger.error("Failed to read config file exception: %s" % err)
raise
symphony_p12 = settings.get(, )
symphony_pwd = settings.get(, )
symphony_pod_uri = settings.get(, )
symphony_keymanager_uri = settings.get(, )
symphony_agent_uri = settings.get(, )
symphony_sessionauth_uri = settings.get(, )
symphony_sid = settings.get(, )
crypt = symphony.Crypt(symphony_p12, symphony_pwd)
symphony_crt, symphony_key = crypt.p12parse()
try:
auth = symphony.Auth(symphony_sessionauth_uri, symphony_keymanager_uri, symphony_crt, symphony_key)
session_token = auth.get_session_token()
self.logger.info("AUTH ( session token ): %s" % session_token)
keymngr_token = auth.get_keymanager_token()
self.logger.info("AUTH ( key manager token ): %s" % keymngr_token)
agent = symphony.Agent(symphony_agent_uri, session_token, keymngr_token)
pod = symphony.Pod(symphony_pod_uri, session_token, keymngr_token)
self.logger.info("INSTANTIATION ( all objects successful)")
except Exception as err:
self.logger.error("Failed to authenticate and initialize: %s" % err)
raise
return agent, pod, symphony_sid | instantiate objects / parse config file |
384,342 | def validate(self, model, checks=[]):
records = self.data.to_dict("records")
self.evaluate_report(
validate(records, headers=list(records[0]),
preset=, schema=self.schema,
order_fields=True, custom_checks=checks)) | Use a defined schema to validate the given table. |
384,343 | def auprc(y_true, y_pred):
y_true, y_pred = _mask_value_nan(y_true, y_pred)
return skm.average_precision_score(y_true, y_pred) | Area under the precision-recall curve |
384,344 | def close(self):
if self.error_log and not self.quiet:
print("\nErrors occured:", file=sys.stderr)
for err in self.error_log:
print(err, file=sys.stderr)
self._session.close() | Print error log and close session |
384,345 | def has_listener(self, evt_name, fn):
listeners = self.__get_listeners(evt_name)
return fn in listeners | 指定listener是否存在
:params evt_name: 事件名称
:params fn: 要注册的触发函数函数 |
384,346 | def execute(opts, data, func, args, kwargs):
cat /etc/sudoerscat /etc/sudoers
cmd = [,
, opts.get(),
,
, ,
,
, opts.get(),
,
data.get()]
if data[] in (, , ):
kwargs[] = True
for arg in args:
cmd.append(_cmd_quote(six.text_type(arg)))
for key in kwargs:
cmd.append(_cmd_quote(.format(key, kwargs[key])))
cmd_ret = __salt__[](cmd, use_vt=True, python_shell=False)
if cmd_ret[] == 0:
cmd_meta = salt.utils.json.loads(cmd_ret[])[]
ret = cmd_meta[]
__context__[] = cmd_meta.get(, 0)
else:
ret = cmd_ret[]
__context__[] = cmd_ret[]
return ret | Allow for the calling of execution modules via sudo.
This module is invoked by the minion if the ``sudo_user`` minion config is
present.
Example minion config:
.. code-block:: yaml
sudo_user: saltdev
Once this setting is made, any execution module call done by the minion will be
run under ``sudo -u <sudo_user> salt-call``. For example, with the above
minion config,
.. code-block:: bash
salt sudo_minion cmd.run 'cat /etc/sudoers'
is equivalent to
.. code-block:: bash
sudo -u saltdev salt-call cmd.run 'cat /etc/sudoers'
being run on ``sudo_minion``. |
384,347 | def _default_hashfunc(content, hashbits):
if content == "":
return 0
x = ord(content[0]) << 7
m = 1000003
mask = 2 ** hashbits - 1
for c in content:
x = ((x * m) ^ ord(c)) & mask
x ^= len(content)
if x == -1:
x = -2
return x | Default hash function is variable-length version of Python's builtin hash.
:param content: data that needs to hash.
:return: return a decimal number. |
384,348 | def resolve_outputs(self):
input_shape = None
for i, shape in enumerate(self._input_shapes.values()):
if i == 0:
input_shape = shape
if len(input_shape) != len(shape) or any(
a is not None and b is not None and a != b
for a, b in zip(input_shape[:-1], shape[:-1])):
raise util.ConfigurationError(
.format(self.name, self._input_shapes))
size = self.kwargs.get()
shape = self.kwargs.get()
if shape is not None:
pass
elif size is not None:
shape = tuple(input_shape[:-1]) + (size, )
else:
raise util.ConfigurationError(
.format(self.name))
self._output_shapes[] = shape | Resolve the names of outputs for this layer into shape tuples. |
384,349 | def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.document is not None:
_dict[] = self.document._to_dict()
if hasattr(self, ) and self.model_id is not None:
_dict[] = self.model_id
if hasattr(self, ) and self.model_version is not None:
_dict[] = self.model_version
if hasattr(self, ) and self.tables is not None:
_dict[] = [x._to_dict() for x in self.tables]
return _dict | Return a json dictionary representing this model. |
384,350 | def get_script(self):
uri = "{}/script".format(self.data["uri"])
return self._helper.do_get(uri) | Gets the configuration script of the logical enclosure by ID or URI.
Return:
str: Configuration script. |
384,351 | def renew_voms_proxy(passwd="", vo=None, lifetime="196:00"):
with tmp_file() as (_, tmp):
with open(tmp, "w") as f:
f.write(passwd)
cmd = "cat | voms-proxy-init --valid ".format(tmp, lifetime)
if vo:
cmd += " -voms ".format(vo)
code, out, _ = interruptable_popen(cmd, shell=True, executable="/bin/bash",
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if code != 0:
raise Exception("proxy renewal failed: {}".format(out)) | Renews the voms proxy using a password *passwd*, an optional virtual organization name *vo*, and
a default *lifetime* of 8 days. The password is written to a temporary file first and piped into
the renewal commad to ensure it is not visible in the process list. |
384,352 | def __parse_organizations(self, stream):
for aliases in self.__parse_stream(stream):
identity = self.__parse_alias(aliases[1])
uuid = identity.email
uid = self._identities.get(uuid, None)
if not uid:
uid = UniqueIdentity(uuid=uuid)
identity.uuid = uuid
uid.identities.append(identity)
self._identities[uuid] = uid
mailmap_id = aliases[0]
name = self.__encode(mailmap_id[0])
if name in MAILMAP_NO_ORGS:
continue
org = Organization(name=name)
self._organizations[name] = org
enrollment = Enrollment(start=MIN_PERIOD_DATE, end=MAX_PERIOD_DATE,
organization=org)
uid.enrollments.append(enrollment) | Parse organizations stream |
384,353 | def get_nameid_data(self):
nameid = None
nameid_data = {}
encrypted_id_data_nodes = self.__query_assertion()
if encrypted_id_data_nodes:
encrypted_data = encrypted_id_data_nodes[0]
key = self.__settings.get_sp_key()
nameid = OneLogin_Saml2_Utils.decrypt_element(encrypted_data, key)
else:
nameid_nodes = self.__query_assertion()
if nameid_nodes:
nameid = nameid_nodes[0]
is_strict = self.__settings.is_strict()
want_nameid = self.__settings.get_security_data().get(, True)
if nameid is None:
if is_strict and want_nameid:
raise OneLogin_Saml2_ValidationError(
,
OneLogin_Saml2_ValidationError.NO_NAMEID
)
else:
if is_strict and want_nameid and not OneLogin_Saml2_Utils.element_text(nameid):
raise OneLogin_Saml2_ValidationError(
,
OneLogin_Saml2_ValidationError.EMPTY_NAMEID
)
nameid_data = {: OneLogin_Saml2_Utils.element_text(nameid)}
for attr in [, , ]:
value = nameid.get(attr, None)
if value:
if is_strict and attr == :
sp_data = self.__settings.get_sp_data()
sp_entity_id = sp_data.get(, )
if sp_entity_id != value:
raise OneLogin_Saml2_ValidationError(
,
OneLogin_Saml2_ValidationError.SP_NAME_QUALIFIER_NAME_MISMATCH
)
nameid_data[attr] = value
return nameid_data | Gets the NameID Data provided by the SAML Response from the IdP
:returns: Name ID Data (Value, Format, NameQualifier, SPNameQualifier)
:rtype: dict |
384,354 | def get_section(value):
section = Section()
if not value or value[0] != :
raise errors.HeaderParseError("Expected section but found {}".format(
value))
section.append(ValueTerminal(, ))
value = value[1:]
if not value or not value[0].isdigit():
raise errors.HeaderParseError("Expected section number but "
"found {}".format(value))
digits =
while value and value[0].isdigit():
digits += value[0]
value = value[1:]
if digits[0] == and digits != :
section.defects.append(errors.InvalidHeaderError("section number"
"has an invalid leading 0"))
section.number = int(digits)
section.append(ValueTerminal(digits, ))
return section, value | '*' digits
The formal BNF is more complicated because leading 0s are not allowed. We
check for that and add a defect. We also assume no CFWS is allowed between
the '*' and the digits, though the RFC is not crystal clear on that.
The caller should already have dealt with leading CFWS. |
384,355 | def create_index(self, index, index_type=GEO2D):
self.logger.info("Adding %s index to stores on attribute: %s" % (index_type, index))
yield self.collection.create_index([(index, index_type)]) | Create an index on a given attribute
:param str index: Attribute to set index on
:param str index_type: See PyMongo index types for further information, defaults to GEO2D index. |
384,356 | def update_message(self, message_id, category_id, title, body,
extended_body, use_textile=False, private=False, notify=None):
path = % message_id
req = ET.Element()
req.append(self._create_message_post_elem(category_id, title, body,
extended_body, use_textile=False, private=False))
if notify is not None:
for person_id in notify:
ET.SubElement(req, ).text = str(int(person_id))
return self._request(path, req) | Updates an existing message, optionally sending notifications to a
selected list of people. Note that you can also upload files using
this function, but you have to format the request as
multipart/form-data. (See the ruby Basecamp API wrapper for an example
of how to do this.) |
384,357 | def close(self, proto):
try:
proto.sendClose()
except Exception as ex:
logger.exception("Failed to send close")
proto.reraise(ex) | Closes a connection |
384,358 | def handler(*names, **kwargs):
def wrapper(f):
if names and isinstance(names[0], bool) and not names[0]:
f.handler = False
return f
if len(names) > 0 and inspect.isclass(names[0]) and \
issubclass(names[0], hfosEvent):
f.names = (str(names[0].realname()),)
else:
f.names = names
f.handler = True
f.priority = kwargs.get("priority", 0)
f.channel = kwargs.get("channel", None)
f.override = kwargs.get("override", False)
args = inspect.getargspec(f)[0]
if args and args[0] == "self":
del args[0]
f.event = getattr(f, "event", bool(args and args[0] == "event"))
return f
return wrapper | Creates an Event Handler
This decorator can be applied to methods of classes derived from
:class:`circuits.core.components.BaseComponent`. It marks the method as a
handler for the events passed as arguments to the ``@handler`` decorator.
The events are specified by their name.
The decorated method's arguments must match the arguments passed to the
:class:`circuits.core.events.Event` on creation. Optionally, the
method may have an additional first argument named *event*. If declared,
the event object that caused the handler to be invoked is assigned to it.
By default, the handler is invoked by the component's root
:class:`~.manager.Manager` for events that are propagated on the channel
determined by the BaseComponent's *channel* attribute.
This may be overridden by specifying a different channel as a keyword
parameter of the decorator (``channel=...``).
Keyword argument ``priority`` influences the order in which handlers
for a specific event are invoked. The higher the priority, the earlier
the handler is executed.
If you want to override a handler defined in a base class of your
component, you must specify ``override=True``, else your method becomes
an additional handler for the event.
**Return value**
Normally, the results returned by the handlers for an event are simply
collected in the :class:`circuits.core.events.Event`'s :attr:`value`
attribute. As a special case, a handler may return a
:class:`types.GeneratorType`. This signals to the dispatcher that the
handler isn't ready to deliver a result yet.
Rather, it has interrupted it's execution with a ``yield None``
statement, thus preserving its current execution state.
The dispatcher saves the returned generator object as a task.
All tasks are reexamined (i.e. their :meth:`next()` method is invoked)
when the pending events have been executed.
This feature avoids an unnecessarily complicated chaining of event
handlers. Imagine a handler A that needs the results from firing an
event E in order to complete. Then without this feature, the final
action of A would be to fire event E, and another handler for
an event ``SuccessE`` would be required to complete handler A's
operation, now having the result from invoking E available
(actually it's even a bit more complicated).
Using this "suspend" feature, the handler simply fires event E and
then yields ``None`` until e.g. it finds a result in E's :attr:`value`
attribute. For the simplest scenario, there even is a utility
method :meth:`circuits.core.manager.Manager.callEvent` that combines
firing and waiting. |
384,359 | def sample_discrete(self, state=None, n_steps=100, random_state=None):
r
random = check_random_state(random_state)
r = random.rand(1 + n_steps)
if state is None:
initial = np.sum(np.cumsum(self.populations_) < r[0])
elif hasattr(state, ) and len(state) == self.n_states_:
initial = np.sum(np.cumsum(state) < r[0])
else:
initial = self.mapping_[state]
cstr = np.cumsum(self.transmat_, axis=1)
chain = [initial]
for i in range(1, n_steps):
chain.append(np.sum(cstr[chain[i - 1], :] < r[i]))
return self.inverse_transform([chain])[0] | r"""Generate a random sequence of states by propagating the model
using discrete time steps given by the model lagtime.
Parameters
----------
state : {None, ndarray, label}
Specify the starting state for the chain.
``None``
Choose the initial state by randomly drawing from the model's
stationary distribution.
``array-like``
If ``state`` is a 1D array with length equal to ``n_states_``,
then it is is interpreted as an initial multinomial
distribution from which to draw the chain's initial state.
Note that the indexing semantics of this array must match the
_internal_ indexing of this model.
otherwise
Otherwise, ``state`` is interpreted as a particular
deterministic state label from which to begin the trajectory.
n_steps : int
Lengths of the resulting trajectory
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Returns
-------
sequence : array of length n_steps
A randomly sampled label sequence |
384,360 | def sort(self, column_or_label, descending=False, distinct=False):
column = self._get_column(column_or_label)
if distinct:
_, row_numbers = np.unique(column, return_index=True)
else:
row_numbers = np.argsort(column, axis=0, kind=)
assert (row_numbers < self.num_rows).all(), row_numbers
if descending:
row_numbers = np.array(row_numbers[::-1])
return self.take(row_numbers) | Return a Table of rows sorted according to the values in a column.
Args:
``column_or_label``: the column whose values are used for sorting.
``descending``: if True, sorting will be in descending, rather than
ascending order.
``distinct``: if True, repeated values in ``column_or_label`` will
be omitted.
Returns:
An instance of ``Table`` containing rows sorted based on the values
in ``column_or_label``.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.sort("Amount")
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Blue | Rectangular | 12 | 2
>>> marbles.sort("Amount", descending = True)
Color | Shape | Amount | Price
Blue | Rectangular | 12 | 2
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Green | Rectangular | 6 | 1.3
Red | Round | 4 | 1.3
Green | Round | 2 | 1
>>> marbles.sort(3) # the Price column
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2
>>> marbles.sort(3, distinct = True)
Color | Shape | Amount | Price
Green | Round | 2 | 1
Red | Round | 4 | 1.3
Green | Rectangular | 9 | 1.4
Red | Round | 7 | 1.75
Blue | Rectangular | 12 | 2 |
384,361 | def init_state(self):
self.in_warc_response = False
self.in_http_response = False
self.in_payload = False | Sets the initial state of the state machine. |
384,362 | def require_single_root_target(self):
target_roots = self.context.target_roots
if len(target_roots) == 0:
raise TaskError()
elif len(target_roots) > 1:
raise TaskError(
.format(.join([repr(t) for t in target_roots])))
return target_roots[0] | If a single target was specified on the cmd line, returns that target.
Otherwise throws TaskError.
:API: public |
384,363 | def setConfigurable(self, state):
self._configurable = state
self._configButton.setVisible(state) | Sets whether or not this logger widget is configurable.
:param state | <bool> |
384,364 | def invoked(self, ctx):
print("{} + {} = {}".format(
ctx.args.x,
ctx.args.y,
ctx.args.x + ctx.args.y)) | Guacamole method used by the command ingredient.
:param ctx:
The guacamole context object. Context provides access to all
features of guacamole. The argparse ingredient adds the ``args``
attribute to it. That attribute contains the result of parsing
command line arguments.
:returns:
The return code of the command. Guacamole translates ``None`` to a
successful exit status (return code zero). |
384,365 | def import_name(mod_name):
try:
mod_obj_old = sys.modules[mod_name]
except KeyError:
mod_obj_old = None
if mod_obj_old is not None:
return mod_obj_old
__import__(mod_name)
mod_obj = sys.modules[mod_name]
return mod_obj | Import a module by module name.
@param mod_name: module name. |
384,366 | def _on_hid_pnp(self, w_param, l_param):
"Process WM_DEVICECHANGE system messages"
new_status = "unknown"
if w_param == DBT_DEVICEARRIVAL:
notify_obj = None
if int(l_param):
notify_obj = DevBroadcastDevInterface.from_address(l_param)
if notify_obj and \
notify_obj.dbcc_devicetype == DBT_DEVTYP_DEVICEINTERFACE:
new_status = "disconnected"
if new_status != "unknown" and new_status != self.current_status:
self.current_status = new_status
self.on_hid_pnp(self.current_status)
return True | Process WM_DEVICECHANGE system messages |
384,367 | def _is_image_sequenced(image):
try:
image.seek(1)
image.seek(0)
result = True
except EOFError:
result = False
return result | Determine if the image is a sequenced image. |
384,368 | def get_valid_error(x1, x2=-1):
if type(x2) == int and x2 == -1:
try:
e = np.array(x1)
except:
raise ValueError()
else:
try:
x1 = np.array(x1)
x2 = np.array(x2)
except:
raise ValueError()
if not len(x1) == len(x2):
raise ValueError()
e = x1 - x2
return e | Function that validates:
* x1 is possible to convert to numpy array
* x2 is possible to convert to numpy array (if exists)
* x1 and x2 have the same length (if both exist) |
384,369 | def handle_setting_changed(sender, setting, value, enter, **kwargs):
if setting == :
AxesProxyHandler.get_implementation(force=True) | Reinitialize handler implementation if a relevant setting changes
in e.g. application reconfiguration or during testing. |
384,370 | def rgb2termhex(r: int, g: int, b: int) -> str:
incs = [0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff]
res = []
parts = r, g, b
for part in parts:
if (part < 0) or (part > 255):
raise ValueError(
.format(parts)
)
i = 0
while i < len(incs) - 1:
s, b = incs[i], incs[i + 1]
if s <= part <= b:
s1 = abs(s - part)
b1 = abs(b - part)
if s1 < b1:
closest = s
else:
closest = b
res.append(closest)
break
i += 1
return rgb2hex(*res) | Convert an rgb value to the nearest hex value that matches a term code.
The hex value will be one in `hex2term_map`. |
384,371 | def _g(self, z):
return np.exp(np.multiply(-self.theta, z)) - 1 | Helper function to solve Frank copula.
This functions encapsulates :math:`g_z = e^{-\\theta z} - 1` used on Frank copulas.
Argument:
z: np.ndarray
Returns:
np.ndarray |
384,372 | def certclone(chain, copy_extensions=False):
for i in range(len(chain)):
chain[i] = chain[i].to_cryptography()
newchain = []
first = True
for original in chain[::-1]:
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
key_pem = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
).decode()
if first:
print(key_pem)
first=False
pubkey = key.public_key()
cert = x509.CertificateBuilder()
cert = cert.subject_name(original.subject)
cert = cert.issuer_name(original.issuer)
cert = cert.serial_number(x509.random_serial_number())
cert = cert.not_valid_before(original.not_valid_before)
cert = cert.not_valid_after(original.not_valid_after)
cert = cert.public_key(pubkey)
if copy_extensions:
for ext in original.extensions:
cert = cert.add_extension(ext.value, critical=ext.critical)
cert = cert.sign(private_key=key, algorithm=original.signature_hash_algorithm, backend=default_backend())
cert_pem = cert.public_bytes(serialization.Encoding.PEM).decode()
print(cert_pem)
newchain.insert(0, cert) | key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
pubkey = key.public_key() |
384,373 | def __period_remaining(self):
elapsed = self.clock() - self.last_reset
return self.period - elapsed | Return the period remaining for the current rate limit window.
:return: The remaing period.
:rtype: float |
384,374 | def parse_band_log(self, message):
if "payload" in message and hasattr(message["payload"], "name"):
record = message["payload"]
for k in dir(record):
if k.startswith("workflows_exc_"):
setattr(record, k[14:], getattr(record, k))
delattr(record, k)
for k, v in self.get_status().items():
setattr(record, "workflows_" + k, v)
logging.getLogger(record.name).handle(record)
else:
self.log.warning(
"Received broken record on log band\n" + "Message: %s\nRecord: %s",
str(message),
str(
hasattr(message.get("payload"), "__dict__")
and message["payload"].__dict__
),
) | Process incoming logging messages from the service. |
384,375 | def base_dict_to_string(base_dict):
outstr =
base_list = sorted(base_dict.items(), key=lambda kv: kv[1], reverse=True)
for base in base_list:
outstr += .format(base[0], base[1])
return outstr[:-1] | Converts a dictionary to a string. {'C': 12, 'A':4} gets converted to C:12;A:4
:param base_dict: Dictionary of bases and counts created by find_if_multibase
:return: String representing that dictionary. |
384,376 | def close(self, **kw):
if self._closing_deferred:
d = defer.Deferred()
def closed(arg):
d.callback(arg)
return arg
self._closing_deferred.addBoth(closed)
return d
self._closing_deferred = defer.Deferred()
def close_command_is_queued(*args):
return self._closing_deferred
d = self._torstate.close_circuit(self.id, **kw)
d.addCallback(close_command_is_queued)
return d | This asks Tor to close the underlying circuit object. See
:meth:`txtorcon.torstate.TorState.close_circuit`
for details.
You may pass keyword arguments to take care of any Flags Tor
accepts for the CLOSECIRCUIT command. Currently, this is only
"IfUnused". So for example: circ.close(IfUnused=True)
:return: Deferred which callbacks with this Circuit instance
ONLY after Tor has confirmed it is gone (not simply that the
CLOSECIRCUIT command has been queued). This could be a while
if you included IfUnused. |
384,377 | def get_renderers(self):
try:
source = self.get_object()
except (ImproperlyConfigured, APIException):
self.renderer_classes = [RENDERER_MAPPING[i] for i in self.__class__.renderers]
return [RENDERER_MAPPING[i]() for i in self.__class__.renderers]
else:
self.renderer_classes = [RENDERER_MAPPING[i] for i in source.__class__.renderers]
return [RENDERER_MAPPING[i]() for i in source.__class__.renderers] | Instantiates and returns the list of renderers that this view can use. |
384,378 | def wash_urlargd(form, content):
result = {}
for k, (dst_type, default) in content.items():
try:
value = form[k]
except KeyError:
result[k] = default
continue
src_type = type(value)
if src_type in (list, tuple):
if dst_type is list:
result[k] = [x for x in value]
continue
if dst_type is tuple:
result[k] = tuple([x for x in value])
continue
value = value[0]
if isinstance(dst_type, types.FunctionType):
result[k] = dst_type(value)
continue
value, src_type, dst_type))
return result | Wash the complete form based on the specification in content.
Content is a dictionary containing the field names as a
key, and a tuple (type, default) as value.
'type' can be list, unicode, legacy.wsgi.utils.StringField, int,
tuple, or legacy.wsgi.utils.Field (for file uploads).
The specification automatically includes the 'ln' field, which is
common to all queries.
Arguments that are not defined in 'content' are discarded.
.. note::
In case `list` or `tuple` were asked for, we assume that
`list` or `tuple` of strings is to be returned. Therefore beware when
you want to use ``wash_urlargd()`` for multiple file upload forms.
:returns: argd dictionary that can be used for passing function
parameters by keywords. |
384,379 | def _input_as_lines(self, data):
if data:
self.Parameters[]\
.on(super(CD_HIT,self)._input_as_lines(data))
return | Writes data to tempfile and sets -i parameter
data -- list of lines, ready to be written to file |
384,380 | def start_at(self, document_fields):
query = query_mod.Query(self)
return query.start_at(document_fields) | Start query at a cursor with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.start_at` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. |
384,381 | def fix_bam_header(job, bamfile, sample_type, univ_options, samtools_options, retained_chroms=None):
if retained_chroms is None:
retained_chroms = []
work_dir = os.getcwd()
input_files = {
sample_type + : bamfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = [,
,
input_files[sample_type + ]]
with open(.join([work_dir, sample_type + ]), ) as headerfile:
docker_call(tool=, tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options[], outfile=headerfile,
tool_version=samtools_options[])
with open(headerfile.name, ) as headerfile, \
open(.join([work_dir, sample_type + ]), ) as outheaderfile:
for line in headerfile:
if line.startswith():
line = .join([x for x in line.strip().split() if not x.startswith()])
if retained_chroms and line.startswith():
if line.strip().split()[1].lstrip() not in retained_chroms:
continue
print(line.strip(), file=outheaderfile)
parameters = [,
docker_path(outheaderfile.name),
input_files[sample_type + ]]
with open(.join([work_dir, sample_type + ]), ) as fixpg_bamfile:
docker_call(tool=, tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options[], outfile=fixpg_bamfile,
tool_version=samtools_options[])
output_file = job.fileStore.writeGlobalFile(fixpg_bamfile.name)
job.fileStore.deleteGlobalFile(bamfile)
job.fileStore.logToMaster(
% (univ_options[], sample_type))
return output_file | Fix the bam header to remove the command line call. Failing to do this causes Picard to reject
the bam.
:param dict bamfile: The input bam file
:param str sample_type: Description of the sample to inject into the filename
:param dict univ_options: Dict of universal options used by almost all tools
:param dict samtools_options: Options specific to samtools
:param list retained_chroms: A list of chromosomes to retain
:return: fsID for the output bam
:rtype: toil.fileStore.FileID |
384,382 | def generate_move(self, position):
while True:
print(position)
raw = input(str(self.color) + "\'s move \n")
move = converter.short_alg(raw, self.color, position)
if move is None:
continue
return move | Returns valid and legal move given position
:type: position: Board
:rtype: Move |
384,383 | def get_assessments_offered(self):
if self.retrieved:
raise errors.IllegalState()
self.retrieved = True
return objects.AssessmentOfferedList(self._results, runtime=self._runtime) | Gets the assessment offered list resulting from the search.
return: (osid.assessment.AssessmentOfferedList) - the assessment
offered list
raise: IllegalState - the assessment offered list has already
been retrieved
*compliance: mandatory -- This method must be implemented.* |
384,384 | def set_sim_data(inj, field, data):
try:
sim_field = sim_inspiral_map[field]
except KeyError:
sim_field = field
if sim_field == :
inj.geocent_end_time = int(data)
inj.geocent_end_time_ns = int(1e9*(data % 1))
else:
setattr(inj, sim_field, data) | Sets data of a SimInspiral instance. |
384,385 | def files(self, *args, **kwargs):
return [p for p in self.listdir(*args, **kwargs) if p.isfile()] | D.files() -> List of the files in this directory.
The elements of the list are Path objects.
This does not walk into subdirectories (see :meth:`walkfiles`).
Accepts parameters to :meth:`listdir`. |
384,386 | def select(self, ids, do_emit=True, **kwargs):
self.eval_js(.format(dumps(ids)))
if do_emit:
self.emit(, ids, **kwargs) | Select some rows in the table.
By default, the `select` event is raised, unless `do_emit=False`. |
384,387 | def configure_model(self, attrs, field_name):
self.relationship = field_name
self._set_method_names(relationship=field_name)
if self.res_name is None:
self.res_name = grammar.singularize(attrs.get(, ).strip()) | Hook for ResourceMeta class to call when initializing model class.
Saves fields obtained from resource class backlinks |
384,388 | def Cp_material(ID, T=298.15):
rMineral fiber
if ID not in materials_dict:
ID = nearest_material(ID)
if ID in refractories:
Cp = refractory_VDI_Cp(ID, T)
elif ID in building_materials:
Cp = float(building_materials[ID][2])
else:
Cp = ASHRAE[ID][1]
if Cp is None:
raise Exception()
else:
Cp = float(Cp)
return Cp | r'''Returns heat capacity of a building, insulating, or refractory
material from tables in [1]_, [2]_, and [3]_. Heat capacity may or
may not be dependent on temperature depending on the source used. Function
must be provided with either a key to one of the dictionaries
`refractories`, `ASHRAE`, or `building_materials` - or a search term which
will pick the closest match based on a fuzzy search. To determine which
source the fuzzy search will pick, use the function `nearest_material`.
Fuzzy searches are slow; it is preferable to call this function with a
material key directly.
Parameters
----------
ID : str
String as described above
T : float, optional
Temperature of the material, [K]
Returns
-------
Cp : float
Heat capacity of the material, [W/m/K]
Examples
--------
>>> Cp_material('Mineral fiber')
840.0
References
----------
.. [1] ASHRAE Handbook: Fundamentals. American Society of Heating,
Refrigerating and Air-Conditioning Engineers, Incorporated, 2013.
.. [2] DIN EN 12524 (2000-07) Building Materials and Products
Hygrothermal Properties - Tabulated Design Values; English Version of
DIN EN 12524.
.. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010. |
384,389 | def h(tagName, *children, **kwargs):
attrs = {}
if in kwargs:
attrs = kwargs.pop()
attrs = attrs.copy()
attrs.update(kwargs)
el = createComponent(tagName)
return el(children, **attrs) | Takes an HTML Tag, children (string, array, or another element), and
attributes
Examples:
>>> h('div', [h('p', 'hey')])
<div><p>hey</p></div> |
384,390 | async def start(self):
_LOGGER.debug(, __version__)
await self.fetch_token()
if self._token is not None:
await self.fetch_device_list()
await self.assign_users()
return True
else:
return False | Start api initialization. |
384,391 | def adjoint(self):
if self.variant == :
variant =
elif self.variant == :
variant =
else:
raise RuntimeError(.format(self.variant))
return WeightedSumSamplingOperator(self.domain, self.sampling_points,
variant) | Adjoint of the sampling operator, a `WeightedSumSamplingOperator`.
If each sampling point occurs only once, the adjoint consists
in inserting the given values into the output at the sampling
points. Duplicate sampling points are weighted with their
multiplicity.
Examples
--------
>>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3))
>>> sampling_points = [[0, 1, 1, 0],
... [0, 1, 2, 0]]
>>> op = odl.SamplingOperator(space, sampling_points)
>>> x = space.element([[1, 2, 3],
... [4, 5, 6]])
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
The ``'integrate'`` variant adjoint puts ones at the indices in
``sampling_points``, multiplied by their multiplicity:
>>> op = odl.SamplingOperator(space, sampling_points,
... variant='integrate')
>>> op.adjoint(op.range.one()) # (0, 0) occurs twice
uniform_discr([-1., -1.], [ 1., 1.], (2, 3)).element(
[[ 2., 0., 0.],
[ 0., 1., 1.]]
)
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True |
384,392 | def SetTimelineOwner(self, username):
self._timeline_owner = username
logger.info(.format(self._timeline_owner)) | Sets the username of the user that should own the timeline.
Args:
username (str): username. |
384,393 | def tube_hires(script, height=1.0, radius=None, radius1=None, radius2=None,
diameter=None, diameter1=None, diameter2=None, cir_segments=32,
rad_segments=1, height_segments=1, center=False,
simple_bottom=False, color=None):
if radius is not None and diameter is None:
if radius1 is None and diameter1 is None:
radius1 = radius
if radius2 is None and diameter2 is None:
radius2 = 0
if diameter is not None:
if radius1 is None and diameter1 is None:
radius1 = diameter / 2
if radius2 is None and diameter2 is None:
radius2 = 0
if diameter1 is not None:
radius1 = diameter1 / 2
if diameter2 is not None:
radius2 = diameter2 / 2
if radius1 is None:
radius1 = 1
if radius2 is None:
radius2 = 0
annulus_hires(script,
radius1=radius1,
radius2=radius2,
cir_segments=cir_segments,
rad_segments=rad_segments)
transform.translate(script, [0, 0, height])
if simple_bottom:
annulus(script,
radius1=radius1,
radius2=radius2,
cir_segments=cir_segments)
else:
layers.duplicate(script)
transform.translate(script, [0, 0, -height])
transform.rotate(script, , 180)
cylinder_open_hires(script, height, radius1,
cir_segments=cir_segments,
height_segments=height_segments)
if radius2 != 0:
cylinder_open_hires(script, height, radius2,
cir_segments=cir_segments,
height_segments=height_segments,
invert_normals=True)
layers.join(script)
clean.merge_vert(script, threshold=0.00002)
if center:
transform.translate(script, [0, 0, -height / 2])
if color is not None:
vert_color.function(script, color=color)
return None | Create a cylinder with user defined number of segments |
384,394 | def addcol(msname, colname=None, shape=None,
data_desc_type=, valuetype=None, init_with=0, **kw):
import numpy
import pyrap.tables
tab = pyrap.tables.table(msname,readonly=False)
try:
tab.getcol(colname)
print()
except RuntimeError:
print(%(colname,msname))
from pyrap.tables import maketabdesc
valuetype = valuetype or
if shape is None:
dshape = list(tab.getcol().shape)
shape = dshape[1:]
if data_desc_type==:
from pyrap.tables import makearrcoldesc
coldmi = tab.getdminfo()
if init_with:
nrows = dshape[0]
rowchunk = nrows//10 if nrows > 1000 else nrows
for row0 in range(0,nrows,rowchunk):
nr = min(rowchunk,nrows-row0)
dshape[0] = nr
tab.putcol(colname,numpy.ones(dshape,dtype=valuetype)*init_with,row0,nr)
tab.close() | add column to MS
msanme : MS to add colmn to
colname : column name
shape : shape
valuetype : data type
data_desc_type : 'scalar' for scalar elements and array for 'array' elements
init_with : value to initialise the column with |
384,395 | def determine_deaths(self, event: Event):
effective_rate = self.mortality_rate(event.index)
effective_probability = 1 - np.exp(-effective_rate)
draw = self.randomness.get_draw(event.index)
affected_simulants = draw < effective_probability
self.population_view.update(pd.Series(, index=event.index[affected_simulants])) | Determines who dies each time step.
Parameters
----------
event :
An event object emitted by the simulation containing an index
representing the simulants affected by the event and timing
information. |
384,396 | def load(self, ):
assert self.status() == self.UNLOADED,\
"Cannot load if there is no unloaded reference. Use reference instead."
self.get_refobjinter().load(self._refobj)
self.set_status(self.LOADED)
self.fetch_new_children()
self.update_restrictions()
self.emit_data_changed() | If the reference is in the scene but unloaded, load it.
.. Note:: Do not confuse this with reference or import. Load means that it is already referenced.
But the data from the reference was not read until now. Load loads the data from the reference.
This will call :meth:`RefobjInterface.load` and set the status to :data:`Reftrack.LOADED`.
:returns: None
:rtype: None
:raises: :class:`ReftrackIntegrityError` |
384,397 | def serialize(self, private=False):
if self.priv_key:
self._serialize(self.priv_key)
else:
self._serialize(self.pub_key)
res = self.common()
res.update({
"crv": self.crv,
"x": self.x,
"y": self.y
})
if private and self.d:
res["d"] = self.d
return res | Go from a
cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey
or EllipticCurvePublicKey instance to a JWK representation.
:param private: Whether we should include the private attributes or not.
:return: A JWK as a dictionary |
384,398 | def _get_mime_type(self, buff):
if self._magic is not None:
return self._magic.id_buffer(buff)
else:
try:
return mimetypes.guess_type("f." + imghdr.what(0, buff))[0]
except (IOError, TypeError):
logging.warning("CouldncontentType' parameter explicitly.")
return None | Get the MIME type for a given stream of bytes
:param buff: Stream of bytes
:type buff: bytes
:rtype: str |
384,399 | def gaussian_distribution(mean, stdev, num_pts=50):
warnings.warn("pyemu.helpers.gaussian_distribution() has moved to plot_utils",PyemuWarning)
from pyemu import plot_utils
return plot_utils.gaussian_distribution(mean=mean,stdev=stdev,num_pts=num_pts) | get an x and y numpy.ndarray that spans the +/- 4
standard deviation range of a gaussian distribution with
a given mean and standard deviation. useful for plotting
Parameters
----------
mean : float
the mean of the distribution
stdev : float
the standard deviation of the distribution
num_pts : int
the number of points in the returned ndarrays.
Default is 50
Returns
-------
x : numpy.ndarray
the x-values of the distribution
y : numpy.ndarray
the y-values of the distribution |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.