Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
375,000 | def delPlayer(name):
player = getPlayer(name)
try: os.remove(player.filename)
except IOError: pass
try: del getKnownPlayers()[player.name]
except: pass
return player | forget about a previously defined PlayerRecord setting by deleting its disk file |
375,001 | def remove_users_from_user_group(self, id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.remove_users_from_user_group_with_http_info(id, **kwargs)
else:
(data) = self.remove_users_from_user_group_with_http_info(id, **kwargs)
return data | Remove multiple users from a specific user group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_users_from_user_group(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body: List of users that should be removed from user group
:return: ResponseContainerUserGroup
If the method is called asynchronously,
returns the request thread. |
375,002 | def _delay_call(self):
now = time.time()
time_since_last = now - self.last_call_time
if time_since_last < DELAY_TIME:
time.sleep(DELAY_TIME - time_since_last)
self.last_call_time = now | Makes sure that web service calls are at least 0.2 seconds apart. |
375,003 | def reraise(tpe, value, tb=None):
" Reraise an exception from an exception info tuple. "
Py3 = (sys.version_info[0] == 3)
if value is None:
value = tpe()
if Py3:
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
exec() | Reraise an exception from an exception info tuple. |
375,004 | def mark_running(self):
with self._lock:
self._set_state(self._RUNNING, self._PAUSED) | Moves the service to the Running state.
Raises if the service is not currently in the Paused state. |
375,005 | def start_timer(self, duration, func, *args):
t = threading.Timer(duration, self._timer_callback, (func, args))
self._timer_callbacks[func] = t
t.start()
self.log.info("Scheduled call to %s in %ds", func.__name__, duration) | Schedules a function to be called after some period of time.
* duration - time in seconds to wait before firing
* func - function to be called
* args - arguments to pass to the function |
375,006 | def reset(name):
ret = {}
client = salt.client.get_local_client(__opts__[])
data = vm_info(name, quiet=True)
if not data:
__jid_event__.fire_event({: .format(name)}, )
return
host = next(six.iterkeys(data))
try:
cmd_ret = client.cmd_iter(
host,
,
[name],
timeout=600)
for comp in cmd_ret:
ret.update(comp)
__jid_event__.fire_event({: .format(name)}, )
except SaltClientError as client_error:
print(client_error)
return ret | Force power down and restart an existing VM |
375,007 | def hardware_info(self, mask=0xFFFFFFFF):
buf = (ctypes.c_uint32 * 32)()
res = self._dll.JLINKARM_GetHWInfo(mask, ctypes.byref(buf))
if res != 0:
raise errors.JLinkException(res)
return list(buf) | Returns a list of 32 integer values corresponding to the bitfields
specifying the power consumption of the target.
The values returned by this function only have significance if the
J-Link is powering the target.
The words, indexed, have the following significance:
0. If ``1``, target is powered via J-Link.
1. Overcurrent bitfield:
0: No overcurrent.
1: Overcurrent happened. 2ms @ 3000mA
2: Overcurrent happened. 10ms @ 1000mA
3: Overcurrent happened. 40ms @ 400mA
2. Power consumption of target (mA).
3. Peak of target power consumption (mA).
4. Peak of target power consumption during J-Link operation (mA).
Args:
self (JLink): the ``JLink`` instance
mask (int): bit mask to decide which hardware information words are
returned (defaults to all the words).
Returns:
List of bitfields specifying different states based on their index
within the list and their value.
Raises:
JLinkException: on hardware error. |
375,008 | def check_initial_subdomain(cls, subdomain_rec):
if subdomain_rec.n != 0:
return False
if subdomain_rec.independent:
return False
return True | Verify that a first-ever subdomain record is well-formed.
* n must be 0
* the subdomain must not be independent of its domain |
375,009 | def post_grade(self, grade):
message_identifier_id = self.message_identifier_id()
operation =
lis_result_sourcedid = self.lis_result_sourcedid
score = float(grade)
if 0 <= score <= 1.0:
xml = generate_request_xml(
message_identifier_id, operation, lis_result_sourcedid,
score)
ret = post_message(self._consumers(), self.key,
self.response_url, xml)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False | Post grade to LTI consumer using XML
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed |
375,010 | def do_clearrep(self, line):
self._split_args(line, 0, 0)
self._command_processor.get_session().get_replication_policy().clear()
self._print_info_if_verbose("Cleared the replication policy") | clearrep Set the replication policy to default.
The default replication policy has no preferred or blocked member nodes, allows
replication and sets the preferred number of replicas to 3. |
375,011 | def notify_init(self):
_session_count = len(self._sessions)
self._update_session_count(1, _session_count)
if _session_count == 1:
self._run_queued_callbacks() | run the queed callback for just the first session only |
375,012 | def stat(self, path):
path = self._adjust_cwd(path)
self._log(DEBUG, % path)
t, msg = self._request(CMD_STAT, path)
if t != CMD_ATTRS:
raise SFTPError()
return SFTPAttributes._from_msg(msg) | Retrieve information about a file on the remote system. The return
value is an object whose attributes correspond to the attributes of
python's C{stat} structure as returned by C{os.stat}, except that it
contains fewer fields. An SFTP server may return as much or as little
info as it wants, so the results may vary from server to server.
Unlike a python C{stat} object, the result may not be accessed as a
tuple. This is mostly due to the author's slack factor.
The fields supported are: C{st_mode}, C{st_size}, C{st_uid}, C{st_gid},
C{st_atime}, and C{st_mtime}.
@param path: the filename to stat
@type path: str
@return: an object containing attributes about the given file
@rtype: SFTPAttributes |
375,013 | def find(wave, dep_var, der=None, inst=1, indep_min=None, indep_max=None):
r
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
close_min = np.isclose(min(ret._dep_vector), dep_var, FP_RTOL, FP_ATOL)
close_max = np.isclose(max(ret._dep_vector), dep_var, FP_RTOL, FP_ATOL)
if ((np.amin(ret._dep_vector) > dep_var) and (not close_min)) or (
(np.amax(ret._dep_vector) < dep_var) and (not close_max)
):
return None
cross_wave = ret._dep_vector - dep_var
sign_wave = np.sign(cross_wave)
exact_idx = np.where(np.isclose(ret._dep_vector, dep_var, FP_RTOL, FP_ATOL))[0]
left_idx = np.where(np.diff(sign_wave))[0]
left_idx = np.setdiff1d(left_idx, exact_idx)
left_idx = np.setdiff1d(left_idx, exact_idx - 1)
right_idx = left_idx + 1 if left_idx.size else np.array([])
indep_var = ret._indep_vector[exact_idx] if exact_idx.size else np.array([])
dvector = np.zeros(exact_idx.size).astype(int) if exact_idx.size else np.array([])
if left_idx.size and (ret.interp == "STAIRCASE"):
idvector = (
2.0 * (ret._dep_vector[right_idx] > ret._dep_vector[left_idx]).astype(int)
- 1
)
if indep_var.size:
indep_var = np.concatenate((indep_var, ret._indep_vector[right_idx]))
dvector = np.concatenate((dvector, idvector))
sidx = np.argsort(indep_var)
indep_var = indep_var[sidx]
dvector = dvector[sidx]
else:
indep_var = ret._indep_vector[right_idx]
dvector = idvector
elif left_idx.size:
y_left = ret._dep_vector[left_idx]
y_right = ret._dep_vector[right_idx]
x_left = ret._indep_vector[left_idx]
x_right = ret._indep_vector[right_idx]
slope = ((y_left - y_right) / (x_left - x_right)).astype(float)
if indep_var.size:
indep_var = np.concatenate(
(indep_var, x_left + ((dep_var - y_left) / slope))
)
dvector = np.concatenate((dvector, np.where(slope > 0, 1, -1)))
sidx = np.argsort(indep_var)
indep_var = indep_var[sidx]
dvector = dvector[sidx]
else:
indep_var = x_left + ((dep_var - y_left) / slope)
dvector = np.where(slope > 0, +1, -1)
if der is not None:
indep_var = np.extract(dvector == der, indep_var)
return indep_var[inst - 1] if inst <= indep_var.size else None | r"""
Return the independent variable point associated with a dependent variable point.
If the dependent variable point is not in the dependent variable vector the
independent variable vector point is obtained by linear interpolation
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param dep_var: Dependent vector value to search for
:type dep_var: integer, float or complex
:param der: Dependent vector derivative filter. If +1 only independent
vector points that have positive derivatives when crossing
the requested dependent vector point are returned; if -1 only
independent vector points that have negative derivatives when
crossing the requested dependent vector point are returned;
if 0 only independent vector points that have null derivatives
when crossing the requested dependent vector point are
returned; otherwise if None all independent vector points are
returned regardless of the dependent vector derivative. The
derivative of the first and last point of the waveform is
assumed to be null
:type der: integer, float or complex
:param inst: Instance number filter. If, for example, **inst** equals 3,
then the independent variable vector point at which the
dependent variable vector equals the requested value for the
third time is returned
:type inst: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: integer, float or None if the dependent variable point is not found
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.find
:raises:
* RuntimeError (Argument \`dep_var\` is not valid)
* RuntimeError (Argument \`der\` is not valid)
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`inst\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]] |
375,014 | def index_relations(sender, pid_type, json=None,
record=None, index=None, **kwargs):
if not json:
json = {}
pid = PersistentIdentifier.query.filter(
PersistentIdentifier.object_uuid == record.id,
PersistentIdentifier.pid_type == pid_type,
).one_or_none()
relations = None
if pid:
relations = serialize_relations(pid)
if relations:
json[] = relations
return json | Add relations to the indexed record. |
375,015 | def imap_unordered(requests, stream=True, pool=None, size=2, exception_handler=None):
def send(r):
return r.send(stream=stream)
pool = pool if pool else Pool(size)
with contextlib.closing(Pool(size)) as pool:
for request in pool.imap_unordered(send, requests):
if request.response is not None:
yield request.response
elif exception_handler:
exception_handler(request, request.exception)
if not pool:
pool.close() | Concurrently converts a generator object of Requests to
a generator of Responses.
:param requests: a generator of Request objects.
:param stream: If False, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. default is 2
:param exception_handler: Callback function, called when exception occured. Params: Request, Exception |
375,016 | def get_uids_from_record(self, record, key):
value = record.get(key, None)
if value is None:
return []
if isinstance(value, basestring):
value = value.split(",")
return filter(lambda uid: uid, value) | Returns a list of parsed UIDs from a single form field identified by
the given key.
A form field ending with `_uid` can contain an empty value, a
single UID or multiple UIDs separated by a comma.
This method parses the UID value and returns a list of non-empty UIDs. |
375,017 | def is_ready_update(self):
size_of_buffer = len(self.training_buffer.update_buffer[])
return size_of_buffer > max(int(self.trainer_parameters[] / self.policy.sequence_length), 1) | Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to whether or not update_model() can be run |
375,018 | def get_nodes(self, coord, coords):
def get_coord(coord):
return coords.get(coord, self.ds.coords.get(coord))
return list(map(get_coord,
coord.attrs.get(, ).split()[:2])) | Get the variables containing the definition of the nodes
Parameters
----------
coord: xarray.Coordinate
The mesh variable
coords: dict
The coordinates to use to get node coordinates |
375,019 | def self_consistent_update(u_kn, N_k, f_k):
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
states_with_samples = (N_k > 0)
log_denominator_n = logsumexp(f_k[states_with_samples] - u_kn[states_with_samples].T, b=N_k[states_with_samples], axis=1)
return -1. * logsumexp(-log_denominator_n - u_kn, axis=1) | Return an improved guess for the dimensionless free energies
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
f_k : np.ndarray, shape=(n_states), dtype='float'
Updated estimate of f_k
Notes
-----
Equation C3 in MBAR JCP paper. |
375,020 | def upsert(self, doc, namespace, timestamp, update_spec=None):
index, doc_type = self._index_and_mapping(namespace)
doc_id = u(doc.pop("_id"))
metadata = {
: namespace,
: timestamp
}
action = {
: ,
: index,
: doc_type,
: doc_id,
: self._formatter.format_document(doc)
}
meta_action = {
: ,
: self.meta_index_name,
: self.meta_type,
: doc_id,
: bson.json_util.dumps(metadata)
}
self.index(action, meta_action, doc, update_spec)
| Insert a document into Elasticsearch. |
375,021 | def main(args):
ui = getUI(args)
if ui.optionIsSet("test"):
unittest.main(argv=[sys.argv[0]])
elif ui.optionIsSet("help"):
ui.usage()
else:
verbose = ui.optionIsSet("verbose")
stranded = ui.optionIsSet("stranded")
if stranded:
sys.stderr.write("Sorry, stranded mode hasnll get exactly two, since we
regions_1 = [x for x in BEDIterator(ui.getArgument(0), verbose=verbose)]
regions_2 = [x for x in BEDIterator(ui.getArgument(1), verbose=verbose)]
for r in regionsIntersection(regions_1, regions_2):
out_fh.write(str(r) + "\n") | main entry point for the GenomicIntIntersection script.
:param args: the arguments for this script, as a list of string. Should
already have had things like the script name stripped. That
is, if there are no args provided, this should be an empty
list. |
375,022 | def record_magic(dct, magic_kind, magic_name, func):
if magic_kind == :
dct[][magic_name] = dct[][magic_name] = func
else:
dct[magic_kind][magic_name] = func | Utility function to store a function as a magic of a specific kind.
Parameters
----------
dct : dict
A dictionary with 'line' and 'cell' subdicts.
magic_kind : str
Kind of magic to be stored.
magic_name : str
Key to store the magic as.
func : function
Callable object to store. |
375,023 | def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.status is not None:
_dict[] = self.status
if hasattr(self, ) and self.message is not None:
_dict[] = self.message
return _dict | Return a json dictionary representing this model. |
375,024 | def bonds(self):
seen = set()
for n, m_bond in self._adj.items():
seen.add(n)
for m, bond in m_bond.items():
if m not in seen:
yield n, m, bond | iterate other all bonds |
375,025 | def token_of_request(
self,
method,
host,
url,
qheaders,
content_type=None,
body=None):
parsed_url = urlparse(url)
netloc = parsed_url.netloc
path = parsed_url.path
query = parsed_url.query
if not host:
host = netloc
path_with_query = path
if query != :
path_with_query = .join([path_with_query, , query])
data = .join(["%s %s" %
(method, path_with_query), "\n", "Host: %s" %
host, "\n"])
if content_type:
data += "Content-Type: %s" % (content_type) + "\n"
data += qheaders
data += "\n"
if content_type and content_type != "application/octet-stream" and body:
data += body.decode(encoding=)
return .format(self.__access_key, self.__token(data)) | <Method> <PathWithRawQuery>
Host: <Host>
Content-Type: <ContentType>
[<X-Qiniu-*> Headers]
[<Body>] #这里的 <Body> 只有在 <ContentType> 存在且不为 application/octet-stream 时才签进去。 |
375,026 | def _do_multivalued_field_facets(self, results, field_facets):
facet_dict = {}
for field in field_facets:
facet_list = {}
if not self._multi_value_field(field):
continue
for result in results:
field_value = getattr(result, field)
for item in field_value:
facet_list[item] = facet_list.get(item, 0) + 1
facet_dict[field] = list(facet_list.items())
return facet_dict | Implements a multivalued field facet on the results.
This is implemented using brute force - O(N^2) -
because Xapian does not have it implemented yet
(see http://trac.xapian.org/ticket/199) |
375,027 | def maybe_replace_any_if_equal(name, expected, actual):
is_equal = expected == actual
if not is_equal and Config.replace_any:
actual_str = minimize_whitespace(str(actual))
if actual_str and actual_str[0] in {, "Anytyping.Anyt.Any'}
if not is_equal:
expected_annotation = minimize_whitespace(str(expected))
actual_annotation = minimize_whitespace(str(actual))
raise ValueError(
f"incompatible existing {name}. " +
f"Expected: {expected_annotation!r}, actual: {actual_annotation!r}"
)
return expected or actual | Return the type given in `expected`.
Raise ValueError if `expected` isn't equal to `actual`. If --replace-any is
used, the Any type in `actual` is considered equal.
The implementation is naively checking if the string representation of
`actual` is one of "Any", "typing.Any", or "t.Any". This is done for two
reasons:
1. I'm lazy.
2. We want people to be able to explicitly state that they want Any without it
being replaced. This way they can use an alias. |
375,028 | def information_content(values):
"Number of bits to represent the probability distribution in values."
probabilities = normalize(removeall(0, values))
return sum(-p * log2(p) for p in probabilities) | Number of bits to represent the probability distribution in values. |
375,029 | def repr(self, changed_widgets=None):
if changed_widgets is None:
changed_widgets={}
local_changed_widgets = {}
self._set_updated()
return .join((, self.type, , self.innerHTML(local_changed_widgets), , self.type, )) | It is used to automatically represent the object to HTML format
packs all the attributes, children and so on.
Args:
changed_widgets (dict): A dictionary containing a collection of tags that have to be updated.
The tag that have to be updated is the key, and the value is its textual repr. |
375,030 | def _identify(self, dataframe):
idx = ~idx
return idx | Returns a list of indexes containing only the points that pass the filter.
Parameters
----------
dataframe : DataFrame |
375,031 | def _make_request(self, opener, request, timeout=None):
timeout = timeout or self.timeout
try:
return opener.open(request, timeout=timeout)
except HTTPError as err:
exc = handle_error(err)
exc.__cause__ = None
raise exc | Make the API call and return the response. This is separated into
it's own function, so we can mock it easily for testing.
:param opener:
:type opener:
:param request: url payload to request
:type request: urllib.Request object
:param timeout: timeout value or None
:type timeout: float
:return: urllib response |
375,032 | def verify_checksum(*lines):
for line in lines:
checksum = line[68:69]
if not checksum.isdigit():
continue
checksum = int(checksum)
computed = compute_checksum(line)
if checksum != computed:
complaint = (
)
raise ValueError(complaint.format(checksum, computed, line)) | Verify the checksum of one or more TLE lines.
Raises `ValueError` if any of the lines fails its checksum, and
includes the failing line in the error message. |
375,033 | def processRequest(self, request: Request, frm: str):
logger.debug("{} received client request: {} from {}".
format(self.name, request, frm))
self.nodeRequestSpikeMonitorData[] += 1
| Handle a REQUEST from the client.
If the request has already been executed, the node re-sends the reply to
the client. Otherwise, the node acknowledges the client request, adds it
to its list of client requests, and sends a PROPAGATE to the
remaining nodes.
:param request: the REQUEST from the client
:param frm: the name of the client that sent this REQUEST |
375,034 | def bytes_to_ustr(self, b):
"convert bytes array to unicode string"
return b.decode(charset_map.get(self.charset, self.charset)) | convert bytes array to unicode string |
375,035 | def get_base_url(self, force_http=False):
base_url = SHConfig().aws_metadata_url.rstrip() if force_http else
aws_bucket = SHConfig().aws_s3_l1c_bucket if self.data_source is DataSource.SENTINEL2_L1C else \
SHConfig().aws_s3_l2a_bucket
return .format(base_url, aws_bucket) | Creates base URL path
:param force_http: `True` if HTTP base URL should be used and `False` otherwise
:type force_http: str
:return: base url string
:rtype: str |
375,036 | def search(self, start_ts, end_ts):
for meta_collection_name in self._meta_collections():
meta_coll = self.meta_database[meta_collection_name]
for ts_ns_doc in meta_coll.find(
{"_ts": {"$lte": end_ts, "$gte": start_ts}}
):
yield ts_ns_doc | Called to query Mongo for documents in a time range. |
375,037 | def postprocess_result(morphresult, trim_phonetic, trim_compound):
word, analysis = morphresult
return {
: deconvert(word),
: [postprocess_analysis(a, trim_phonetic, trim_compound) for a in analysis]
} | Postprocess vabamorf wrapper output. |
375,038 | def __create(self, account_id, name, short_description, amount, period,
**kwargs):
params = {
: account_id,
: name,
: short_description,
: amount,
: period
}
return self.make_call(self.__create, params, kwargs) | Call documentation: `/subscription_plan/create
<https://www.wepay.com/developer/reference/subscription_plan#create>`_,
plus extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay` |
375,039 | def _init_multi_count_metrics(self, pplan_helper):
to_in_init = [self.metrics[i] for i in self.inputs_init
if i in self.metrics and isinstance(self.metrics[i], MultiCountMetric)]
for in_stream in pplan_helper.get_my_bolt().inputs:
stream_id = in_stream.stream.id
global_stream_id = in_stream.stream.component_name + "/" + stream_id
for metric in to_in_init:
metric.add_key(stream_id)
metric.add_key(global_stream_id)
to_out_init = [self.metrics[i] for i in self.outputs_init
if i in self.metrics and isinstance(self.metrics[i], MultiCountMetric)]
for out_stream in pplan_helper.get_my_bolt().outputs:
stream_id = out_stream.stream.id
for metric in to_out_init:
metric.add_key(stream_id) | Initializes the default values for a necessary set of MultiCountMetrics |
375,040 | def to_java_doubles(m):
global _java
if _java is None: _init_registration()
m = np.asarray(m)
dims = len(m.shape)
if dims > 2: raise ValueError()
bindat = serialize_numpy(m, )
return (_java.jvm.nben.util.Numpy.double2FromBytes(bindat) if dims == 2
else _java.jvm.nben.util.Numpy.double1FromBytes(bindat)) | to_java_doubles(m) yields a java array object for the vector or matrix m. |
375,041 | def unset(entity, *types):
if not types:
types = (TypedField,)
fields = list(entity._fields.keys())
remove = (x for x in fields if isinstance(x, types))
for field in remove:
del entity._fields[field] | Unset the TypedFields on the input `entity`.
Args:
entity: A mixbox.Entity object.
*types: A variable-length list of TypedField subclasses. If not
provided, defaults to TypedField. |
375,042 | def monitor_session_span_command_direction(self, **kwargs):
config = ET.Element("config")
monitor = ET.SubElement(config, "monitor", xmlns="urn:brocade.com:mgmt:brocade-span")
session = ET.SubElement(monitor, "session")
session_number_key = ET.SubElement(session, "session-number")
session_number_key.text = kwargs.pop()
span_command = ET.SubElement(session, "span-command")
direction = ET.SubElement(span_command, "direction")
direction.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
375,043 | def cube(width, height, depth, center=(0.0, 0.0, 0.0), normals=True, uvs=True) -> VAO:
width, height, depth = width / 2.0, height / 2.0, depth / 2.0
pos = numpy.array([
center[0] + width, center[1] - height, center[2] + depth,
center[0] + width, center[1] + height, center[2] + depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] + width, center[1] + height, center[2] + depth,
center[0] - width, center[1] + height, center[2] + depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] + width, center[1] - height, center[2] - depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] + width, center[1] - height, center[2] + depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] + width, center[1] + height, center[2] + depth,
center[0] + width, center[1] - height, center[2] + depth,
center[0] + width, center[1] - height, center[2] - depth,
center[0] + width, center[1] - height, center[2] + depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] + width, center[1] - height, center[2] - depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] - width, center[1] - height, center[2] - depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] - width, center[1] + height, center[2] + depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] - width, center[1] - height, center[2] + depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] - width, center[1] - height, center[2] - depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] + width, center[1] - height, center[2] - depth,
center[0] - width, center[1] - height, center[2] - depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] - width, center[1] - height, center[2] - depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] + width, center[1] + height, center[2] - depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] + width, center[1] + height, center[2] + depth,
center[0] - width, center[1] + height, center[2] - depth,
center[0] - width, center[1] + height, center[2] + depth,
center[0] + width, center[1] + height, center[2] + depth,
], dtype=numpy.float32)
if normals:
normal_data = numpy.array([
-0, 0, 1,
-0, 0, 1,
-0, 0, 1,
0, 0, 1,
0, 0, 1,
0, 0, 1,
1, 0, 0,
1, 0, 0,
1, 0, 0,
1, 0, 0,
1, 0, 0,
1, 0, 0,
0, -1, 0,
0, -1, 0,
0, -1, 0,
0, -1, 0,
0, -1, 0,
0, -1, 0,
-1, -0, 0,
-1, -0, 0,
-1, -0, 0,
-1, -0, 0,
-1, -0, 0,
-1, -0, 0,
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
], dtype=numpy.float32)
if uvs:
uvs_data = numpy.array([
1, 0,
1, 1,
0, 0,
1, 1,
0, 1,
0, 0,
1, 0,
1, 1,
0, 0,
1, 1,
0, 1,
0, 0,
1, 1,
0, 1,
0, 0,
1, 1,
0, 0,
1, 0,
0, 1,
0, 0,
1, 0,
0, 1,
1, 0,
1, 1,
1, 0,
1, 1,
0, 1,
1, 0,
0, 1,
0, 0,
1, 1,
0, 1,
1, 0,
0, 1,
0, 0,
1, 0
], dtype=numpy.float32)
vao = VAO("geometry:cube")
vao.buffer(pos, , [])
if normals:
vao.buffer(normal_data, , [])
if uvs:
vao.buffer(uvs_data, , [])
return vao | Creates a cube VAO with normals and texture coordinates
Args:
width (float): Width of the cube
height (float): Height of the cube
depth (float): Depth of the cube
Keyword Args:
center: center of the cube as a 3-component tuple
normals: (bool) Include normals
uvs: (bool) include uv coordinates
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance |
375,044 | def nps_survey_response_show(self, survey_id, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/nps-api/responses
api_path = "/api/v2/nps/surveys/{survey_id}/responses/{id}.json"
api_path = api_path.format(survey_id=survey_id, id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/nps-api/responses#show-response |
375,045 | def _get_transitions(self, expression: Any, expected_type: PredicateType) -> Tuple[List[str], PredicateType]:
if isinstance(expression, (list, tuple)):
function_transitions, return_type, argument_types = self._get_function_transitions(expression[0],
expected_type)
if len(argument_types) != len(expression[1:]):
raise ParsingError(f)
argument_transitions = []
for argument_type, subexpression in zip(argument_types, expression[1:]):
argument_transitions.extend(self._get_transitions(subexpression, argument_type)[0])
return function_transitions + argument_transitions, return_type
elif isinstance(expression, str):
if expression not in self._functions:
raise ParsingError(f"Unrecognized constant: {expression}")
constant_types = self._function_types[expression]
if len(constant_types) == 1:
constant_type = constant_types[0]
if expected_type not in constant_types:
raise ParsingError(f
f)
return [f], expected_type
else:
raise ParsingError() | This is used when converting a logical form into an action sequence. This piece
recursively translates a lisp expression into an action sequence, making sure we match the
expected type (or using the expected type to get the right type for constant expressions). |
375,046 | def draw_data_value_rect(cairo_context, color, value_size, name_size, pos, port_side):
c = cairo_context
rot_angle = .0
move_x = 0.
move_y = 0.
if port_side is SnappedSide.RIGHT:
move_x = pos[0] + name_size[0]
move_y = pos[1]
c.rectangle(move_x, move_y, value_size[0], value_size[1])
elif port_side is SnappedSide.BOTTOM:
move_x = pos[0] - value_size[1]
move_y = pos[1] + name_size[0]
rot_angle = pi / 2.
c.rectangle(move_x, move_y, value_size[1], value_size[0])
elif port_side is SnappedSide.LEFT:
move_x = pos[0] - value_size[0]
move_y = pos[1]
c.rectangle(move_x, move_y, value_size[0], value_size[1])
elif port_side is SnappedSide.TOP:
move_x = pos[0] - value_size[1]
move_y = pos[1] - value_size[0]
rot_angle = -pi / 2.
c.rectangle(move_x, move_y, value_size[1], value_size[0])
c.set_source_rgba(*color)
c.fill_preserve()
c.set_source_rgb(*gui_config.gtk_colors[].to_floats())
c.stroke()
return rot_angle, move_x, move_y | This method draws the containing rect for the data port value, depending on the side and size of the label.
:param cairo_context: Draw Context
:param color: Background color of value part
:param value_size: Size (width, height) of label holding the value
:param name_size: Size (width, height) of label holding the name
:param pos: Position of name label start point (upper left corner of label)
:param port_side: Side on which the value part should be drawn
:return: Rotation Angle (to rotate value accordingly), X-Position of value label start point, Y-Position
of value label start point |
375,047 | def from_pb(cls, app_profile_pb, instance):
match_app_profile_name = _APP_PROFILE_NAME_RE.match(app_profile_pb.name)
if match_app_profile_name is None:
raise ValueError(
"AppProfile protobuf name was not in the " "expected format.",
app_profile_pb.name,
)
if match_app_profile_name.group("instance") != instance.instance_id:
raise ValueError(
"Instance ID on app_profile does not match the "
"instance ID on the client"
)
if match_app_profile_name.group("project") != instance._client.project:
raise ValueError(
"Project ID on app_profile does not match the "
"project ID on the client"
)
app_profile_id = match_app_profile_name.group("app_profile_id")
result = cls(app_profile_id, instance)
result._update_from_pb(app_profile_pb)
return result | Creates an instance app_profile from a protobuf.
:type app_profile_pb: :class:`instance_pb2.app_profile_pb`
:param app_profile_pb: An instance protobuf object.
:type instance: :class:`google.cloud.bigtable.instance.Instance`
:param instance: The instance that owns the cluster.
:rtype: :class:`AppProfile`
:returns: The AppProfile parsed from the protobuf response.
:raises: :class:`ValueError <exceptions.ValueError>` if the AppProfile
name does not match
``projects/{project}/instances/{instance_id}/appProfiles/{app_profile_id}``
or if the parsed instance ID does not match the istance ID
on the client.
or if the parsed project ID does not match the project ID
on the client. |
375,048 | def search(self, terms):
images = {}
response = self._request_builder(, , params={: terms})
if self._validate_response(response):
body = json.loads(response.content.decode())[]
for image in body:
images[image[]] = image
return images | returns a dict {"name": "image_dict"} |
375,049 | def cash_table(self):
_cash = pd.DataFrame(
data=[self.cash[1::],
self.time_index_max],
index=[,
]
).T
_cash = _cash.assign(
date=_cash.datetime.apply(lambda x: pd.to_datetime(str(x)[0:10]))
).assign(account_cookie=self.account_cookie)
return _cash.set_index([, ], drop=False)
| 现金的table |
375,050 | def drop_curie(self, name):
curies = self.o[LINKS_KEY][self.draft.curies_rel]
if isinstance(curies, dict) and curies[] == name:
del self.o[LINKS_KEY][self.draft.curies_rel]
return
for i, curie in enumerate(curies):
if curie[] == name:
del curies[i]
break
continue | Removes a CURIE.
The CURIE link with the given name is removed from the document. |
375,051 | def reconstruct_emds(edm, Om, all_points, method=None, **kwargs):
from .point_set import dm_from_edm
N = all_points.shape[0]
d = all_points.shape[1]
dm = dm_from_edm(edm)
if method is None:
from .mds import superMDS
Xhat, __ = superMDS(all_points[0, :], N, d, Om=Om, dm=dm)
else:
C = kwargs.get(, None)
b = kwargs.get(, None)
if C is None or b is None:
raise NameError(
)
KE_noisy = np.multiply(np.outer(dm, dm), Om)
if method == :
from .mds import iterativeEMDS
Xhat, __ = iterativeEMDS(
all_points[0, :], N, d, KE=KE_noisy, C=C, b=b)
elif method == :
from .mds import relaxedEMDS
Xhat, __ = relaxedEMDS(
all_points[0, :], N, d, KE=KE_noisy, C=C, b=b)
else:
raise NameError(, method)
Y, R, t, c = procrustes(all_points, Xhat, scale=False)
return Y | Reconstruct point set using E(dge)-MDS. |
375,052 | def get_asn_verbose_dns(self, asn=None):
if asn[0:2] != :
asn = .format(asn)
zone = .format(asn)
try:
log.debug(.format(zone))
data = self.dns_resolver.query(zone, )
return str(data[0])
except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers,
dns.resolver.NoAnswer, dns.exception.Timeout) as e:
raise ASNLookupError(
.format(
e.__class__.__name__, asn)
)
except:
raise ASNLookupError(
.format(asn)
) | The function for retrieving the information for an ASN from
Cymru via port 53 (DNS). This is needed since IP to ASN mapping via
Cymru DNS does not return the ASN Description like Cymru Whois does.
Args:
asn (:obj:`str`): The AS number (required).
Returns:
str: The raw ASN data.
Raises:
ASNLookupError: The ASN lookup failed. |
375,053 | def update_domain(self, domain, emailAddress=None, ttl=None, comment=None):
if not any((emailAddress, ttl, comment)):
raise exc.MissingDNSSettings(
"No settings provided to update_domain().")
uri = "/domains/%s" % utils.get_id(domain)
body = {"comment": comment,
"ttl": ttl,
"emailAddress": emailAddress,
}
none_keys = [key for key, val in body.items()
if val is None]
for none_key in none_keys:
body.pop(none_key)
resp, resp_body = self._async_call(uri, method="PUT", body=body,
error_class=exc.DomainUpdateFailed, has_response=False)
return resp_body | Provides a way to modify the following attributes of a domain
record:
- email address
- ttl setting
- comment |
375,054 | def to_triangulation(self):
from matplotlib.tri import Triangulation
conn = self.split("simplices").unstack()
coords = self.nodes.coords.copy()
node_map = pd.Series(data = np.arange(len(coords)), index = coords.index)
conn = node_map.loc[conn.values.flatten()].values.reshape(*conn.shape)
return Triangulation(coords.x.values, coords.y.values, conn) | Returns the mesh as a matplotlib.tri.Triangulation instance. (2D only) |
375,055 | def maybe_cythonize_extensions(top_path, config):
is_release = os.path.exists(os.path.join(top_path, ))
if is_release:
build_from_c_and_cpp_files(config.ext_modules)
else:
message = (
).format(
CYTHON_MIN_VERSION)
try:
import Cython
if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION:
message += .format(
Cython.__version__)
raise ValueError(message)
from Cython.Build import cythonize
except ImportError as exc:
exc.args += (message,)
raise
directives = {: }
cy_cov = os.environ.get(, False)
if cy_cov:
directives[] = True
macros = [(, ), (, )]
else:
macros = []
config.ext_modules = cythonize(
config.ext_modules,
compiler_directives=directives)
for e in config.ext_modules:
e.define_macros.extend(macros) | Tweaks for building extensions between release and development mode. |
375,056 | def get_target_extraction_context(self, build_file_path: str) -> dict:
extraction_context = {}
for name, builder in Plugin.builders.items():
extraction_context[name] = extractor(name, builder,
build_file_path, self)
return extraction_context | Return a build file parser target extraction context.
The target extraction context is a build-file-specific mapping from
builder-name to target extraction function,
for every registered builder. |
375,057 | def getAdaptedTraveltime(self, edgeID, time):
self._connection._beginMessage(tc.CMD_GET_EDGE_VARIABLE, tc.VAR_EDGE_TRAVELTIME,
edgeID, 1 + 4)
self._connection._string += struct.pack(
"!Bi", tc.TYPE_INTEGER, time)
return self._connection._checkResult(tc.CMD_GET_EDGE_VARIABLE,
tc.VAR_EDGE_TRAVELTIME, edgeID).readDouble() | getAdaptedTraveltime(string, double) -> double
Returns the travel time value (in s) used for (re-)routing
which is valid on the edge at the given time. |
375,058 | def setup_logging(verbosity, filename=None):
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
level = levels[min(verbosity, len(levels) - 1)]
logging.root.setLevel(level)
fmt = logging.Formatter(
)
hdlr = logging.StreamHandler()
hdlr.setFormatter(fmt)
logging.root.addHandler(hdlr)
if filename:
hdlr = logging.FileHandler(filename, )
hdlr.setFormatter(fmt)
logging.root.addHandler(hdlr) | Configure logging for this tool. |
375,059 | def register_intent_parser(self, intent_parser, domain=0):
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_intent_parser(
intent_parser=intent_parser) | Register a intent parser with a domain.
Args:
intent_parser(intent): The intent parser you wish to register.
domain(str): a string representing the domain you wish register the intent
parser to. |
375,060 | def device_destroy(self, id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.device_destroy_with_http_info(id, **kwargs)
else:
(data) = self.device_destroy_with_http_info(id, **kwargs)
return data | Delete a device. # noqa: E501
Delete device. Only available for devices with a developer certificate. Attempts to delete a device with a production certicate will return a 400 response. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.device_destroy(id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str id: (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
375,061 | def get(self, wheel=False):
try:
url = get_url(self.client, self.name, self.version,
wheel, hashed_format=True)[0]
except exceptions.MissingUrlException as e:
raise SystemExit(e)
if wheel:
self.temp_dir = tempfile.mkdtemp()
save_dir = self.temp_dir
else:
save_dir = self.save_dir
save_file = .format(save_dir, url.split()[-1])
request.urlretrieve(url, save_file)
logger.info(.format(save_file))
return save_file | Downloads the package from PyPI.
Returns:
Full path of the downloaded file.
Raises:
PermissionError if the save_dir is not writable. |
375,062 | def precompute_begin_state(self):
begin_state = tuple([ BEGIN ] * self.state_size)
choices, weights = zip(*self.model[begin_state].items())
cumdist = list(accumulate(weights))
self.begin_cumdist = cumdist
self.begin_choices = choices | Caches the summation calculation and available choices for BEGIN * state_size.
Significantly speeds up chain generation on large corpuses. Thanks, @schollz! |
375,063 | def ndim(self):
if self.is_raster():
return {
FeatureType.DATA: 4,
FeatureType.MASK: 4,
FeatureType.SCALAR: 2,
FeatureType.LABEL: 2,
FeatureType.DATA_TIMELESS: 3,
FeatureType.MASK_TIMELESS: 3,
FeatureType.SCALAR_TIMELESS: 1,
FeatureType.LABEL_TIMELESS: 1
}[self]
return None | If given FeatureType stores a dictionary of numpy.ndarrays it returns dimensions of such arrays. |
375,064 | def get_victoria_day(self, year):
may_24th = date(year, 5, 24)
shift = may_24th.weekday() or 7
victoria_day = may_24th - timedelta(days=shift)
return (victoria_day, "Victoria Day") | Return Victoria Day for Edinburgh.
Set to the Monday strictly before May 24th. It means that if May 24th
is a Monday, it's shifted to the week before. |
375,065 | def _event_duration(vevent):
if hasattr(vevent, ):
return vevent.dtend.value - vevent.dtstart.value
elif hasattr(vevent, ) and vevent.duration.value:
return vevent.duration.value
return timedelta(0) | unify dtend and duration to the duration of the given vevent |
375,066 | def initialize(self, store):
assert isinstance(store, stores.BaseStore)
self.messages = Queue()
self.store = store
self.store.register(self) | Common initialization of handlers happens here. If additional
initialization is required, this method must either be called with
``super`` or the child class must assign the ``store`` attribute and
register itself with the store. |
375,067 | def flatten(suitable_for_isinstance):
types = set()
if not isinstance(suitable_for_isinstance, tuple):
suitable_for_isinstance = (suitable_for_isinstance,)
for thing in suitable_for_isinstance:
if isinstance(thing, tuple):
types.update(flatten(thing))
else:
types.add(thing)
return tuple(types) | isinstance() can accept a bunch of really annoying different types:
* a single type
* a tuple of types
* an arbitrary nested tree of tuples
Return a flattened tuple of the given argument. |
375,068 | def DbPutClassAttributeProperty2(self, argin):
self._log.debug("In DbPutClassAttributeProperty2()")
class_name = argin[0]
nb_attributes = int(argin[1])
self.db.put_class_attribute_property2(class_name, nb_attributes, argin[2:]) | This command adds support for array properties compared to the previous one
called DbPutClassAttributeProperty. The old comman is still there for compatibility reason
:param argin: Str[0] = Tango class name
Str[1] = Attribute number
Str[2] = Attribute name
Str[3] = Property number
Str[4] = Property name
Str[5] = Property value number (array case)
Str[5] = Property value 1
Str[n] = Property value n (array case)
.....
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid |
375,069 | def creds_display(creds: dict, filt: dict = None, filt_dflt_incl: bool = False) -> dict:
rv = {}
if filt is None:
filt = {}
for cred_uuid in creds.get(, {}):
for cred in creds[][cred_uuid]:
cred_info = cred[]
if cred_info[] in rv:
continue
cred_cd_id = cred_info[]
if (not filt) or (filt_dflt_incl and cred_cd_id not in filt):
rv[cred_info[]] = cred_info
continue
if filt and cred_cd_id in filt:
if ({k: str(filt[cred_cd_id][k]) for k in filt[cred_cd_id]}.items() <= cred_info[].items()):
rv[cred_info[]] = cred_info
return rv | Find indy-sdk creds matching input filter from within input creds structure,
json-loaded as returned via HolderProver.get_creds(), and return human-legible summary.
:param creds: creds structure returned by HolderProver.get_creds(); e.g.,
::
{
"attrs": {
"attr0_uuid": [
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "2",
"attr1": "Hello",
"attr2": "World"
},
"referent": "00000000-0000-0000-0000-000000000000",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0",
"cred_rev_id": null,
"rev_reg_id": null
}
},
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "1",
"attr1": "Nice",
"attr2": "Tractor"
},
"referent": "00000000-0000-0000-0000-111111111111",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0",
"cred_rev_id": null,
"rev_reg_id": null
}
}
],
"attr1_uuid": [
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "2",
"attr1": "Hello",
"attr2": "World"
},
"referent": "00000000-0000-0000-0000-000000000000",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0",
"cred_rev_id": null,
"rev_reg_id": null
}
},
{
"interval": null,
"cred_info": {
"attrs": {
"attr0": "1",
"attr1": "Nice",
"attr2": "Tractor"
},
"referent": "00000000-0000-0000-0000-111111111111",
"schema_id": "Q4zqM7aXqm7gDQkUVLng9h:2:bc-reg:1.0",
"cred_def_id": "Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0",
"cred_rev_id": null,
"rev_reg_id": null
}
}
],
"attr2_uuid": [
...
]
}
}
:param filt: filter for matching attributes and values; dict (None or empty for no filter, matching all)
mapping each cred def identifier to dict mapping attributes to values to match; e.g.,
::
{
'Q4zqM7aXqm7gDQkUVLng9h:3:CL:18:0': {
'attr0': 1, # operation stringifies en passant
'attr1': 'Nice'
},
...
}
:param: filt_dflt_incl: whether to include (True) all attributes for schema that filter does not identify
or to exclude (False) all such attributes
:return: human-legible dict mapping credential identifiers to human-readable creds synopses -- not proper
indy-sdk creds structures (each as per HolderProver.get_creds_display_coarse()) -- for creds matching
input filter |
375,070 | def get_averaged_bias_matrix(bias_sequences, dtrajs, nstates=None):
r
from pyemma.thermo.extensions.util import (logsumexp as _logsumexp, logsumexp_pair as _logsumexp_pair)
nmax = int(_np.max([dtraj.max() for dtraj in dtrajs]))
if nstates is None:
nstates = nmax + 1
elif nstates < nmax + 1:
raise ValueError("nstates is smaller than the number of observed microstates")
nthermo = bias_sequences[0].shape[1]
bias_matrix = -_np.ones(shape=(nthermo, nstates), dtype=_np.float64) * _np.inf
counts = _np.zeros(shape=(nstates,), dtype=_np.intc)
for s in range(len(bias_sequences)):
for i in range(nstates):
idx = (dtrajs[s] == i)
nidx = idx.sum()
if nidx == 0:
continue
counts[i] += nidx
selected_bias_sequence = bias_sequences[s][idx, :]
for k in range(nthermo):
bias_matrix[k, i] = _logsumexp_pair(
bias_matrix[k, i],
_logsumexp(
_np.ascontiguousarray(-selected_bias_sequence[:, k]),
inplace=False))
idx = counts.nonzero()
log_counts = _np.log(counts[idx])
bias_matrix *= -1.0
bias_matrix[:, idx] += log_counts[_np.newaxis, :]
return bias_matrix | r"""
Computes a bias matrix via an exponential average of the observed frame wise bias energies.
Parameters
----------
bias_sequences : list of numpy.ndarray(T_i, num_therm_states)
A single reduced bias energy trajectory or a list of reduced bias energy trajectories.
For every simulation frame seen in trajectory i and time step t, btrajs[i][t, k] is the
reduced bias energy of that frame evaluated in the k'th thermodynamic state (i.e. at
the k'th Umbrella/Hamiltonian/temperature)
dtrajs : list of numpy.ndarray(T_i) of int
A single discrete trajectory or a list of discrete trajectories. The integers are indexes
in 0,...,num_conf_states-1 enumerating the num_conf_states Markov states or the bins the
trajectory is in at any time.
nstates : int, optional, default=None
Number of configuration states.
Returns
-------
bias_matrix : numpy.ndarray(shape=(num_therm_states, num_conf_states)) object
bias_energies_full[j, i] is the bias energy in units of kT for each discrete state i
at thermodynamic state j. |
375,071 | def download_apcor(self, uri):
local_file = os.path.basename(uri)
if os.access(local_file, os.F_OK):
fobj = open(local_file)
else:
fobj = storage.vofile(uri, view=)
fobj.seek(0)
str = fobj.read()
fobj.close()
apcor_str = str
return ApcorData.from_string(apcor_str) | Downloads apcor data.
Args:
uri: The URI of the apcor data file.
Returns:
apcor: ossos.downloads.core.ApcorData |
375,072 | def iterate_analogy_datasets(args):
for dataset_name in args.analogy_datasets:
parameters = nlp.data.list_datasets(dataset_name)
for key_values in itertools.product(*parameters.values()):
kwargs = dict(zip(parameters.keys(), key_values))
yield dataset_name, kwargs, nlp.data.create(dataset_name, **kwargs) | Generator over all analogy evaluation datasets.
Iterates over dataset names, keyword arguments for their creation and the
created dataset. |
375,073 | def _get_role(rolename):
path = os.path.join(, rolename + )
if not os.path.exists(path):
abort("Couldnrfullname'] = rolename
return role | Reads and parses a file containing a role |
375,074 | def _symmetrize_correlograms(correlograms):
n_clusters, _, n_bins = correlograms.shape
assert n_clusters == _
correlograms[..., 0] = np.maximum(correlograms[..., 0],
correlograms[..., 0].T)
sym = correlograms[..., 1:][..., ::-1]
sym = np.transpose(sym, (1, 0, 2))
return np.dstack((sym, correlograms)) | Return the symmetrized version of the CCG arrays. |
375,075 | def get_status_badge(self, project, definition, branch_name=None, stage_name=None, job_name=None, configuration=None, label=None):
route_values = {}
if project is not None:
route_values[] = self._serialize.url(, project, )
if definition is not None:
route_values[] = self._serialize.url(, definition, )
query_parameters = {}
if branch_name is not None:
query_parameters[] = self._serialize.query(, branch_name, )
if stage_name is not None:
query_parameters[] = self._serialize.query(, stage_name, )
if job_name is not None:
query_parameters[] = self._serialize.query(, job_name, )
if configuration is not None:
query_parameters[] = self._serialize.query(, configuration, )
if label is not None:
query_parameters[] = self._serialize.query(, label, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize(, response) | GetStatusBadge.
[Preview API] <p>Gets the build status for a definition, optionally scoped to a specific branch, stage, job, and configuration.</p> <p>If there are more than one, then it is required to pass in a stageName value when specifying a jobName, and the same rule then applies for both if passing a configuration parameter.</p>
:param str project: Project ID or project name
:param str definition: Either the definition name with optional leading folder path, or the definition id.
:param str branch_name: Only consider the most recent build for this branch.
:param str stage_name: Use this stage within the pipeline to render the status.
:param str job_name: Use this job within a stage of the pipeline to render the status.
:param str configuration: Use this job configuration to render the status
:param str label: Replaces the default text on the left side of the badge.
:rtype: str |
375,076 | def enable_service_freshness_checks(self):
if not self.my_conf.check_service_freshness:
self.my_conf.modified_attributes |= \
DICT_MODATTR["MODATTR_FRESHNESS_CHECKS_ENABLED"].value
self.my_conf.check_service_freshness = True
self.my_conf.explode_global_conf()
self.daemon.update_program_status() | Enable service freshness checks (globally)
Format of the line that triggers function call::
ENABLE_SERVICE_FRESHNESS_CHECKS
:return: None |
375,077 | def detect_with_url(
self, url, return_face_id=True, return_face_landmarks=False, return_face_attributes=None, recognition_model="recognition_01", return_recognition_model=False, custom_headers=None, raw=False, **operation_config):
image_url = models.ImageUrl(url=url)
url = self.detect_with_url.metadata[]
path_format_arguments = {
: self._serialize.url("self.config.endpoint", self.config.endpoint, , skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if return_face_id is not None:
query_parameters[] = self._serialize.query("return_face_id", return_face_id, )
if return_face_landmarks is not None:
query_parameters[] = self._serialize.query("return_face_landmarks", return_face_landmarks, )
if return_face_attributes is not None:
query_parameters[] = self._serialize.query("return_face_attributes", return_face_attributes, , div=)
if recognition_model is not None:
query_parameters[] = self._serialize.query("recognition_model", recognition_model, )
if return_recognition_model is not None:
query_parameters[] = self._serialize.query("return_recognition_model", return_recognition_model, )
header_parameters = {}
header_parameters[] =
header_parameters[] =
if custom_headers:
header_parameters.update(custom_headers)
body_content = self._serialize.body(image_url, )
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize(, response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized | Detect human faces in an image, return face rectangles, and optionally
with faceIds, landmarks, and attributes.<br />
* Optional parameters including faceId, landmarks, and attributes.
Attributes include age, gender, headPose, smile, facialHair, glasses,
emotion, hair, makeup, occlusion, accessories, blur, exposure and
noise.
* The extracted face feature, instead of the actual image, will be
stored on server. The faceId is an identifier of the face feature and
will be used in [Face -
Identify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239),
[Face -
Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a),
and [Face - Find
Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237).
It will expire 24 hours after the detection call.
* Higher face image quality means better detection and recognition
precision. Please consider high-quality faces: frontal, clear, and face
size is 200x200 pixels (100 pixels between eyes) or bigger.
* JPEG, PNG, GIF (the first frame), and BMP format are supported. The
allowed image file size is from 1KB to 6MB.
* Faces are detectable when its size is 36x36 to 4096x4096 pixels. If
need to detect very small but clear faces, please try to enlarge the
input image.
* Up to 64 faces can be returned for an image. Faces are ranked by face
rectangle size from large to small.
* Face detector prefer frontal and near-frontal faces. There are cases
that faces may not be detected, e.g. exceptionally large face angles
(head-pose) or being occluded, or wrong image orientation.
* Attributes (age, gender, headPose, smile, facialHair, glasses,
emotion, hair, makeup, occlusion, accessories, blur, exposure and
noise) may not be perfectly accurate. HeadPose's pitch value is a
reserved field and will always return 0.
* Different 'recognitionModel' values are provided. If follow-up
operations like Verify, Identify, Find Similar are needed, please
specify the recognition model with 'recognitionModel' parameter. The
default value for 'recognitionModel' is 'recognition_01', if latest
model needed, please explicitly specify the model you need in this
parameter. Once specified, the detected faceIds will be associated with
the specified recognition model. More details, please refer to [How to
specify a recognition
model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model)
.
:param url: Publicly reachable URL of an image
:type url: str
:param return_face_id: A value indicating whether the operation should
return faceIds of detected faces.
:type return_face_id: bool
:param return_face_landmarks: A value indicating whether the operation
should return landmarks of the detected faces.
:type return_face_landmarks: bool
:param return_face_attributes: Analyze and return the one or more
specified face attributes in the comma-separated string like
"returnFaceAttributes=age,gender". Supported face attributes include
age, gender, headPose, smile, facialHair, glasses and emotion. Note
that each face attribute analysis has additional computational and
time cost.
:type return_face_attributes: list[str or
~azure.cognitiveservices.vision.face.models.FaceAttributeType]
:param recognition_model: Name of recognition model. Recognition model
is used when the face features are extracted and associated with
detected faceIds, (Large)FaceList or (Large)PersonGroup. A recognition
model name can be provided when performing Face - Detect or
(Large)FaceList - Create or (Large)PersonGroup - Create. The default
value is 'recognition_01', if latest model needed, please explicitly
specify the model you need. Possible values include: 'recognition_01',
'recognition_02'
:type recognition_model: str or
~azure.cognitiveservices.vision.face.models.RecognitionModel
:param return_recognition_model: A value indicating whether the
operation should return 'recognitionModel' in response.
:type return_recognition_model: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.cognitiveservices.vision.face.models.DetectedFace]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.face.models.APIErrorException>` |
375,078 | def fnmatch_multiple(candidates, pattern):
try:
candidates_iter = iter(candidates)
except TypeError:
return None
for candidate in candidates_iter:
try:
if fnmatch.fnmatch(candidate, pattern):
return candidate
except TypeError:
pass
return None | Convenience function which runs fnmatch.fnmatch() on each element of passed
iterable. The first matching candidate is returned, or None if there is no
matching candidate. |
375,079 | def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r, clean_lines.lines[linenum])
if matched:
| Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found. |
375,080 | def visit_and_update_expressions(self, visitor_fn):
new_fields = {}
for key, value in six.iteritems(self.fields):
new_value = value.visit_and_update(visitor_fn)
if new_value is not value:
new_fields[key] = new_value
if new_fields:
return ConstructResult(dict(self.fields, **new_fields))
else:
return self | Create an updated version (if needed) of the ConstructResult via the visitor pattern. |
375,081 | def count_emails(self, conditions={}):
url = self.EMAILS_COUNT_URL + "?"
for key, value in conditions.items():
if key is :
value = ",".join(value)
url += % (key, value)
connection = Connection(self.token)
connection.set_url(self.production, url)
connection.set_url(self.production, url)
return connection.get_request() | Count all certified emails |
375,082 | def autofix_codeblock(codeblock, max_line_len=80,
aggressive=False,
very_aggressive=False,
experimental=False):
r
import autopep8
arglist = [, ]
if aggressive:
arglist.extend([])
if very_aggressive:
arglist.extend([, ])
if experimental:
arglist.extend([])
arglist.extend([])
autopep8_options = autopep8.parse_args(arglist)
fixed_codeblock = autopep8.fix_code(codeblock, options=autopep8_options)
return fixed_codeblock | r"""
Uses autopep8 to format a block of code
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> codeblock = ut.codeblock(
'''
def func( with , some = 'Problems' ):
syntax ='Ok'
but = 'Its very messy'
if None:
# syntax might not be perfect due to being cut off
ommiting_this_line_still_works= True
''')
>>> fixed_codeblock = ut.autofix_codeblock(codeblock)
>>> print(fixed_codeblock) |
375,083 | def samples(dataset=, index=0, batchsize=1, shape=(224, 224),
data_format=):
from PIL import Image
images, labels = [], []
basepath = os.path.dirname(__file__)
samplepath = os.path.join(basepath, )
files = os.listdir(samplepath)
for idx in range(index, index + batchsize):
i = idx % 20
file = [n for n in files if .format(dataset, i) in n][0]
label = int(file.split()[0].split()[-1])
path = os.path.join(samplepath, file)
image = Image.open(path)
if dataset == :
image = image.resize(shape)
image = np.asarray(image, dtype=np.float32)
if dataset != and data_format == :
image = np.transpose(image, (2, 0, 1))
images.append(image)
labels.append(label)
labels = np.array(labels)
images = np.stack(images)
return images, labels | Returns a batch of example images and the corresponding labels
Parameters
----------
dataset : string
The data set to load (options: imagenet, mnist, cifar10,
cifar100, fashionMNIST)
index : int
For each data set 20 example images exist. The returned batch
contains the images with index [index, index + 1, index + 2, ...]
batchsize : int
Size of batch.
shape : list of integers
The shape of the returned image (only relevant for Imagenet).
data_format : str
"channels_first" or "channels_last"
Returns
-------
images : array_like
The batch of example images
labels : array of int
The labels associated with the images. |
375,084 | def _routes_updated(self, ri):
new_routes = ri.router[]
old_routes = ri.routes
adds, removes = bc.common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug("Added route entry is ", route)
for del_route in removes:
if route[] == del_route[]:
removes.remove(del_route)
driver = self.driver_manager.get_driver(ri.id)
driver.routes_updated(ri, , route)
for route in removes:
LOG.debug("Removed route entry is ", route)
driver = self.driver_manager.get_driver(ri.id)
driver.routes_updated(ri, , route)
ri.routes = new_routes | Update the state of routes in the router.
Compares the current routes with the (configured) existing routes
and detect what was removed or added. Then configure the
logical router in the hosting device accordingly.
:param ri: RouterInfo corresponding to the router.
:return: None
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
DriverException if the configuration operation fails. |
375,085 | def check_len_in(self, min_len, max_len, item):
if max_len is None:
if min_len:
self.add_check("_coconut.len(" + item + ") >= " + str(min_len))
elif min_len == max_len:
self.add_check("_coconut.len(" + item + ") == " + str(min_len))
elif not min_len:
self.add_check("_coconut.len(" + item + ") <= " + str(max_len))
else:
self.add_check(str(min_len) + " <= _coconut.len(" + item + ") <= " + str(max_len)) | Checks that the length of item is in range(min_len, max_len+1). |
375,086 | def __densify_border(self):
if isinstance(self._input_geom, MultiPolygon):
polygons = [polygon for polygon in self._input_geom]
else:
polygons = [self._input_geom]
points = []
for polygon in polygons:
if len(polygon.interiors) == 0:
exterior = LineString(polygon.exterior)
points += self.__fixed_interpolation(exterior)
else:
exterior = LineString(polygon.exterior)
points += self.__fixed_interpolation(exterior)
for j in range(len(polygon.interiors)):
interior = LineString(polygon.interiors[j])
points += self.__fixed_interpolation(interior)
return points | Densify the border of a polygon.
The border is densified by a given factor (by default: 0.5).
The complexity of the polygon's geometry is evaluated in order
to densify the borders of its interior rings as well.
Returns:
list: a list of points where each point is represented by
a list of its reduced coordinates
Example:
[[X1, Y1], [X2, Y2], ..., [Xn, Yn] |
375,087 | def as_float_array(a):
return np.asarray(a, dtype=np.quaternion).view((np.double, 4)) | View the quaternion array as an array of floats
This function is fast (of order 1 microsecond) because no data is
copied; the returned quantity is just a "view" of the original.
The output view has one more dimension (of size 4) than the input
array, but is otherwise the same shape. |
375,088 | def update(self, name=None, description=None, privacy_policy=None,
subscription_policy=None, is_managed=None):
with db.session.begin_nested():
if name is not None:
self.name = name
if description is not None:
self.description = description
if (
privacy_policy is not None and
PrivacyPolicy.validate(privacy_policy)
):
self.privacy_policy = privacy_policy
if (
subscription_policy is not None and
SubscriptionPolicy.validate(subscription_policy)
):
self.subscription_policy = subscription_policy
if is_managed is not None:
self.is_managed = is_managed
db.session.merge(self)
return self | Update group.
:param name: Name of group.
:param description: Description of group.
:param privacy_policy: PrivacyPolicy
:param subscription_policy: SubscriptionPolicy
:returns: Updated group |
375,089 | def record_to_fs(self):
fr = self.record
fn_path = self.file_name
if fr.contents:
if six.PY2:
with self._fs.open(fn_path, ) as f:
self.record_to_fh(f)
else:
with self._fs.open(fn_path, , newline=) as f:
self.record_to_fh(f) | Create a filesystem file from a File |
375,090 | def get_alpha_value(self):
if isinstance(self.__alpha_value, float) is False:
raise TypeError("The type of __alpha_value must be float.")
return self.__alpha_value | getter
Learning rate. |
375,091 | def IsDesktopLocked() -> bool:
isLocked = False
desk = ctypes.windll.user32.OpenDesktopW(ctypes.c_wchar_p(), 0, 0, 0x0100)
if desk:
isLocked = not ctypes.windll.user32.SwitchDesktop(desk)
ctypes.windll.user32.CloseDesktop(desk)
return isLocked | Check if desktop is locked.
Return bool.
Desktop is locked if press Win+L, Ctrl+Alt+Del or in remote desktop mode. |
375,092 | def post(self):
self._construct_post_data()
post_args = {"json": self.post_data}
self.http_method_args.update(post_args)
return self.http_method("POST") | Makes the HTTP POST to the url sending post_data. |
375,093 | def zone_compare(timezone):
*America/Denver
if in __grains__[] or in __grains__[]:
return timezone == get_zone()
if in __grains__[]:
if not os.path.isfile(_get_localtime_path()):
return timezone == get_zone()
tzfile = _get_localtime_path()
zonepath = _get_zone_file(timezone)
try:
return filecmp.cmp(tzfile, zonepath, shallow=False)
except OSError as exc:
problematic_file = exc.filename
if problematic_file == zonepath:
raise SaltInvocationError(
t find a local timezone "{0}"Failed to read {0} to determine current timezone: {1}'
.format(tzfile, exc.strerror))
raise | Compares the given timezone name with the system timezone name.
Checks the hash sum between the given timezone, and the one set in
/etc/localtime. Returns True if names and hash sums match, and False if not.
Mostly useful for running state checks.
.. versionchanged:: 2016.3.0
.. note::
On Solaris-link operating systems only a string comparison is done.
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems only a string comparison is done.
CLI Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver' |
375,094 | def job_exists(name=None):
*
if not name:
raise SaltInvocationError(name\)
server = _connect()
if server.job_exists(name):
return True
else:
return False | Check whether the job exists in configured Jenkins jobs.
:param name: The name of the job is check if it exists.
:return: True if job exists, False if job does not exist.
CLI Example:
.. code-block:: bash
salt '*' jenkins.job_exists jobname |
375,095 | def record_schemas(
fn, wrapper, location, request_schema=None, response_schema=None):
has_acceptable = hasattr(fn, )
if request_schema is not None:
wrapper._request_schema = wrapper._request_schema = request_schema
wrapper._request_schema_location = location
if has_acceptable:
fn._acceptable_metadata._request_schema = request_schema
fn._acceptable_metadata._request_schema_location = location
if response_schema is not None:
wrapper._response_schema = wrapper._response_schema = response_schema
wrapper._response_schema_location = location
if has_acceptable:
fn._acceptable_metadata._response_schema = response_schema
fn._acceptable_metadata._response_schema_location = location | Support extracting the schema from the decorated function. |
375,096 | def get(self, request, format=None):
bots = Bot.objects.filter(owner=request.user)
serializer = BotSerializer(bots, many=True)
return Response(serializer.data) | Get list of bots
---
serializer: BotSerializer
responseMessages:
- code: 401
message: Not authenticated |
375,097 | def patch_conf(settings_patch=None, settings_file=None):
if settings_patch is None:
settings_patch = {}
reload_config()
os.environ[ENVIRONMENT_VARIABLE] = settings_file if settings_file else
from bernard.conf import settings as l_settings
r_settings = l_settings._settings
r_settings.update(settings_patch)
if in modules:
from bernard.i18n import translate, intents
translate._regenerate_word_dict()
intents._refresh_intents_db()
yield | Reload the configuration form scratch. Only the default config is loaded,
not the environment-specified config.
Then the specified patch is applied.
This is for unit tests only!
:param settings_patch: Custom configuration values to insert
:param settings_file: Custom settings file to read |
375,098 | def param_title(param_values, slug):
for val in param_values:
if val.param.slug == slug:
return val.param.title
return None | Отображает наименование параметра товара
Пример использования::
{% param_title item.paramvalue_set.all "producer" %}
:param param_values: список значений параметров
:param slug: символьный код параметра
:return: |
375,099 | def cluster_elongate():
"Not so applicable for this sample"
start_centers = [[1.0, 4.5], [3.1, 2.7]]
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_ELONGATE, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION)
template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_ELONGATE, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH) | Not so applicable for this sample |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.