code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def v1_stream_id_associations(tags, stream_id):
stream_id = stream_id.decode('utf-8').strip()
return {'associations': tags.assocs_by_stream_id(stream_id)}
|
Retrieve associations for a given stream_id.
The associations returned have the exact same structure as defined
in the ``v1_tag_associate`` route with one addition: a ``tag``
field contains the full tag name for the association.
|
def indexableText(self, tree):
rval = set()
root = tree.getroot()
for txp in self.text_elts_xpaths:
elts = txp(root)
texts = []
for elt in elts:
text = self.text_extract_xpath(elt)
if len(text) > 0:
texts.append(text[0])
texts = self.separator.join(texts)
texts = [toUnicode(x) for x in self.wordssearch_rx.findall(texts)
if len(x) > 0]
rval |= set(texts)
return rval
|
Provides the indexable - search engine oriented - raw text
@param tree: an ElementTree
@return: set(["foo", "bar", ...])
|
def check_domain_request(self, domains):
request = E.checkDomainRequest(
E.domains(
E.array(
*[E.item(
E.name(domain.split(".")[0]),
E.extension(domain.split(".")[1])
) for domain in domains]
)
)
)
response = self.request(request)
return [Model(item) for item in response.data.array.item]
|
Return the availability of one or more domain names.
The availability is a model containing a domain and a status. It can also have a premium
attribute in case the domain has non-default costs.
|
def ghz_circuit(qubits: Qubits) -> Circuit:
circ = Circuit()
circ += H(qubits[0])
for q0 in range(0, len(qubits)-1):
circ += CNOT(qubits[q0], qubits[q0+1])
return circ
|
Returns a circuit that prepares a multi-qubit Bell state from the zero
state.
|
def describe_api_deployments(restApiId, region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
deployments = []
_deployments = conn.get_deployments(restApiId=restApiId)
while True:
if _deployments:
deployments = deployments + _deployments['items']
if 'position' not in _deployments:
break
_deployments = conn.get_deployments(restApiId=restApiId, position=_deployments['position'])
return {'deployments': [_convert_datetime_str(deployment) for deployment in deployments]}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
|
Gets information about the defined API Deployments. Return list of api deployments.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_deployments restApiId
|
def _pure_data(self, data: Any) -> Any:
if not isinstance(data, dict) and not isinstance(data, list):
try:
return dict(data)
except TypeError:
...
return data
|
If data is dict-like object, convert it to pure dict instance, so it
will be possible to pass to default ``jsonschema.validate`` func.
:param data: Request or response data.
|
def now_time(str=False):
if str:
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return datetime.datetime.now()
|
Get the current time.
|
def add_const(features):
content = np.empty((features.shape[0], features.shape[1] + 1), dtype='float64')
content[:, 0] = 1.
if isinstance(features, np.ndarray):
content[:, 1:] = features
return content
content[:, 1:] = features.iloc[:, :].values
cols = ['Constant'] + features.columns.tolist()
return pd.DataFrame(data=content, index=features.index, columns=cols, dtype='float64')
|
Prepend the constant feature 1 as first feature and return the modified
feature set.
Parameters
----------
features : ndarray or DataFrame
|
def nvrtcCreateProgram(self, src, name, headers, include_names):
res = c_void_p()
headers_array = (c_char_p * len(headers))()
headers_array[:] = encode_str_list(headers)
include_names_array = (c_char_p * len(include_names))()
include_names_array[:] = encode_str_list(include_names)
code = self._lib.nvrtcCreateProgram(byref(res),
c_char_p(encode_str(src)), c_char_p(encode_str(name)),
len(headers),
headers_array, include_names_array)
self._throw_on_error(code)
return res
|
Creates and returns a new NVRTC program object.
|
def publish(self):
if self.published is False:
self.published = True
else:
raise Warning(self.title + ' is already published.')
|
Mark an episode as published.
|
def patched(f):
def wrapped(*args, **kwargs):
kwargs['return_response'] = False
kwargs['prefetch'] = True
return f(*args, **kwargs)
return wrapped
|
Patches a given API function to not send.
|
def _diffSchema(diskSchema, memorySchema):
diskSchema = set(diskSchema)
memorySchema = set(memorySchema)
diskOnly = diskSchema - memorySchema
memoryOnly = memorySchema - diskSchema
diff = []
if diskOnly:
diff.append('Only on disk:')
diff.extend(map(repr, diskOnly))
if memoryOnly:
diff.append('Only in memory:')
diff.extend(map(repr, memoryOnly))
return '\n'.join(diff)
|
Format a schema mismatch for human consumption.
@param diskSchema: The on-disk schema.
@param memorySchema: The in-memory schema.
@rtype: L{bytes}
@return: A description of the schema differences.
|
def get_object(self):
from ..util import AttrDict
c = self.record.unpacked_contents
if not c:
c = yaml.safe_load(self.default)
return AttrDict(c)
|
Return contents in object form, an AttrDict
|
def select_chain(self, chain_urls: Dict[str, List[str]]) -> Tuple[str, List[str]]:
chain_name = self.scenario.chain_name
if chain_name in ('any', 'Any', 'ANY'):
chain_name = random.choice(list(chain_urls.keys()))
log.info('Using chain', chain=chain_name)
try:
return chain_name, chain_urls[chain_name]
except KeyError:
raise ScenarioError(
f'The scenario requested chain "{chain_name}" for which no RPC-URL is known.',
)
|
Select a chain and return its name and RPC URL.
If the currently loaded scenario's designated chain is set to 'any',
we randomly select a chain from the given `chain_urls`.
Otherwise, we will return `ScenarioRunner.scenario.chain_name` and whatever value
may be associated with this key in `chain_urls`.
:raises ScenarioError:
if ScenarioRunner.scenario.chain_name is not one of `('any', 'Any', 'ANY')`
and it is not a key in `chain_urls`.
|
def generate_unique_name(name_prefix, reservation_id=None):
if reservation_id and isinstance(reservation_id, str) and len(reservation_id) >= 4:
unique_id = str(uuid.uuid4())[:4] + "-" + reservation_id[-4:]
else:
unique_id = str(uuid.uuid4())[:8]
return name_prefix + "_" + unique_id
|
Generate a unique name.
Method generate a guid and adds the first 8 characteres of the new guid to 'name_prefix'.
If reservation id is provided than the first 4 chars of the generated guid are taken and the last 4
of the reservation id
|
def inject(self, solutions):
if not hasattr(self, 'pop_injection_directions'):
self.pop_injection_directions = []
for solution in solutions:
if len(solution) != self.N:
raise ValueError('method `inject` needs a list or array'
+ (' each el with dimension (`len`) %d' % self.N))
self.pop_injection_directions.append(
array(solution, copy=False, dtype=float) - self.mean)
|
inject a genotypic solution. The solution is used as direction
relative to the distribution mean to compute a new candidate
solution returned in method `ask_geno` which in turn is used in
method `ask`.
>>> import cma
>>> es = cma.CMAEvolutionStrategy(4 * [1], 2)
>>> while not es.stop():
... es.inject([4 * [0.0]])
... X = es.ask()
... break
>>> assert X[0][0] == X[0][1]
|
def _set_closest_stroke_width(self, width):
width *= self.pixel_to_size_ratio() / 6.
stroke_width_range = glGetFloatv(GL_LINE_WIDTH_RANGE)
stroke_width_granularity = glGetFloatv(GL_LINE_WIDTH_GRANULARITY)
if width < stroke_width_range[0]:
glLineWidth(stroke_width_range[0])
return
if width > stroke_width_range[1]:
glLineWidth(stroke_width_range[1])
return
glLineWidth(round(width / stroke_width_granularity) * stroke_width_granularity)
|
Sets the line width to the closest supported one
Not all line widths are supported. This function queries both minimum and maximum as well as the step size of
the line width and calculates the width, which is closest to the given one. This width is then set.
:param width: The desired line width
|
def get(ctx, uri):
http_client = get_wva(ctx).get_http_client()
cli_pprint(http_client.get(uri))
|
Perform an HTTP GET of the provided URI
The URI provided is relative to the /ws base to allow for easy navigation of
the resources exposed by the WVA. Example Usage::
\b
$ wva get /
{'ws': ['vehicle',
'hw',
'config',
'state',
'files',
'alarms',
'subscriptions',
'password']}
$ wva get /vehicle
{'vehicle': ['vehicle/ecus', 'vehicle/data', 'vehicle/dtc']}
$ wva get /vehicle/ecus
{'ecus': ['vehicle/ecus/can0ecu0', 'vehicle/ecus/can0ecu251']}
$ wva get /vehicle/ecus/can0ecu0
{'can0ecu0': ['vehicle/ecus/can0ecu0/name',
'vehicle/ecus/can0ecu0/address',
'vehicle/ecus/can0ecu0/function',
'vehicle/ecus/can0ecu0/bus',
'vehicle/ecus/can0ecu0/channel',
'vehicle/ecus/can0ecu0/make',
'vehicle/ecus/can0ecu0/model',
'vehicle/ecus/can0ecu0/serial_number',
'vehicle/ecus/can0ecu0/unit_number',
'vehicle/ecus/can0ecu0/VIN']}
$ wva get /vehicle/ecus/can0ecu0/bus
{'bus': 'J1939'}
|
def add(self, *destinations):
buffered_messages = None
if not self._any_added:
self._any_added = True
buffered_messages = self._destinations[0].messages
self._destinations = []
self._destinations.extend(destinations)
if buffered_messages:
for message in buffered_messages:
self.send(message)
|
Adds new destinations.
A destination should never ever throw an exception. Seriously.
A destination should not mutate the dictionary it is given.
@param destinations: A list of callables that takes message
dictionaries.
|
def fit_transform(self, X, y=None):
X_original, missing_mask = self.prepare_input_data(X)
observed_mask = ~missing_mask
X = X_original.copy()
if self.normalizer is not None:
X = self.normalizer.fit_transform(X)
X_filled = self.fill(X, missing_mask, inplace=True)
if not isinstance(X_filled, np.ndarray):
raise TypeError(
"Expected %s.fill() to return NumPy array but got %s" % (
self.__class__.__name__,
type(X_filled)))
X_result = self.solve(X_filled, missing_mask)
if not isinstance(X_result, np.ndarray):
raise TypeError(
"Expected %s.solve() to return NumPy array but got %s" % (
self.__class__.__name__,
type(X_result)))
X_result = self.project_result(X=X_result)
X_result[observed_mask] = X_original[observed_mask]
return X_result
|
Fit the imputer and then transform input `X`
Note: all imputations should have a `fit_transform` method,
but only some (like IterativeImputer) also support inductive mode
using `fit` or `fit_transform` on `X_train` and then `transform`
on new `X_test`.
|
def get_index_from_alias(alias_name, index_client=None):
index_client = index_client or indices_client()
if not index_client.exists_alias(name=alias_name):
return None
return list(index_client.get_alias(name=alias_name).keys())[0]
|
Retrieve the base index name from an alias
Args:
alias_name (str) Name of the alias
index_client (Elasticsearch.IndicesClient) an Elasticsearch index
client. Optional, will create one if not given
Returns: (str) Name of index
|
def get_syslog(self, service_id, version_number, name):
content = self._fetch("/service/%s/version/%d/syslog/%s" % (service_id, version_number, name))
return FastlySyslog(self, content)
|
Get the Syslog for a particular service and version.
|
def get_subscription_by_channel_id_and_endpoint_id(
self, channel_id, endpoint_id):
subscriptions = self.search_subscriptions(
channel_id=channel_id, endpoint_id=endpoint_id)
try:
return subscriptions[0]
except IndexError:
raise DataFailureException(url, 404, "No subscription found")
|
Search for subscription by a given channel and endpoint
|
def request(self, action, data={}, headers={}, method='GET'):
data = self.merge(data, {'user': self.username, 'password': self.password, 'api_id': self.apiId})
return Transport.request(self, action, data, headers, method)
|
Append the user authentication details to every incoming request
|
def cmd_whois_domain(domain):
warnings.filterwarnings("ignore")
data = whois.whois(domain)
data = remove_duplicates(data)
print(json.dumps(data, indent=4, default=str))
|
Simple whois client to check domain names.
Example:
\b
$ habu.whois.domain portantier.com
{
"domain_name": "portantier.com",
"registrar": "Amazon Registrar, Inc.",
"whois_server": "whois.registrar.amazon.com",
...
|
def start(config, args):
global mode
if core.is_standalone():
from glances.standalone import GlancesStandalone as GlancesMode
elif core.is_client():
if core.is_client_browser():
from glances.client_browser import GlancesClientBrowser as GlancesMode
else:
from glances.client import GlancesClient as GlancesMode
elif core.is_server():
from glances.server import GlancesServer as GlancesMode
elif core.is_webserver():
from glances.webserver import GlancesWebServer as GlancesMode
logger.info("Start {} mode".format(GlancesMode.__name__))
mode = GlancesMode(config=config, args=args)
mode.serve_forever()
mode.end()
|
Start Glances.
|
def serialize_close(code: int, reason: str) -> bytes:
check_close(code)
return struct.pack("!H", code) + reason.encode("utf-8")
|
Serialize the data for a close frame.
This is the reverse of :func:`parse_close`.
|
def register_incoming_conn(self, conn):
assert conn, "conn is required"
conn.set_outbound_pending_change_callback(self._on_conn_change)
self.connections.appendleft(conn)
self._set_on_close_cb(conn)
self._on_conn_change()
|
Add incoming connection into the heap.
|
def get_key(self, command, args):
spec = COMMANDS.get(command.upper())
if spec is None:
raise UnroutableCommand('The command "%r" is unknown to the '
'router and cannot be handled as a '
'result.' % command)
if 'movablekeys' in spec['flags']:
raise UnroutableCommand('The keys for "%r" are movable and '
'as such cannot be routed to a single '
'host.')
keys = extract_keys(args, spec['key_spec'])
if len(keys) == 1:
return keys[0]
elif not keys:
raise UnroutableCommand(
'The command "%r" does not operate on a key which means '
'that no suitable host could be determined. Consider '
'using a fanout instead.')
raise UnroutableCommand(
'The command "%r" operates on multiple keys (%d passed) which is '
'something that is not supported.' % (command, len(keys)))
|
Returns the key a command operates on.
|
def while_not_sync_standby(self, func):
if not self.is_synchronous_mode() or self.patroni.nosync:
return func()
with self._member_state_lock:
self._disable_sync += 1
try:
if self.touch_member():
for _ in polling_loop(timeout=self.dcs.loop_wait*2, interval=2):
try:
if not self.is_sync_standby(self.dcs.get_cluster()):
break
except DCSError:
logger.warning("Could not get cluster state, skipping synchronous standby disable")
break
logger.info("Waiting for master to release us from synchronous standby")
else:
logger.warning("Updating member state failed, skipping synchronous standby disable")
return func()
finally:
with self._member_state_lock:
self._disable_sync -= 1
|
Runs specified action while trying to make sure that the node is not assigned synchronous standby status.
Tags us as not allowed to be a sync standby as we are going to go away, if we currently are wait for
leader to notice and pick an alternative one or if the leader changes or goes away we are also free.
If the connection to DCS fails we run the action anyway, as this is only a hint.
There is a small race window where this function runs between a master picking us the sync standby and
publishing it to the DCS. As the window is rather tiny consequences are holding up commits for one cycle
period we don't worry about it here.
|
def get_client(self, request=None):
if not isinstance(request, oauth.Request):
request = self.get_oauth_request()
client_key = request.get_parameter('oauth_consumer_key')
if not client_key:
raise Exception('Missing "oauth_consumer_key" parameter in ' \
'OAuth "Authorization" header')
client = models.Client.get_by_key_name(client_key)
if not client:
raise Exception('Client "%s" not found.' % client_key)
return client
|
Return the client from the OAuth parameters.
|
def get_permissions(self, grp_name, resource):
self.project_service.set_auth(self._token_project)
return self.project_service.get_permissions(grp_name, resource)
|
Get permissions associated the group has with the given resource.
Args:
grp_name (string): Name of group.
resource (intern.resource.boss.Resource): Identifies which data
model object to operate on.
Returns:
(list): List of permissions.
Raises:
requests.HTTPError on failure.
|
def refresh(self):
if self.exists:
self.delete()
self.populate()
self.open()
|
Refresh the cache by deleting the old one and creating a new one.
|
def group_dict(self, group: str) -> Dict[str, Any]:
return dict(
(opt.name, opt.value())
for name, opt in self._options.items()
if not group or group == opt.group_name
)
|
The names and values of options in a group.
Useful for copying options into Application settings::
from tornado.options import define, parse_command_line, options
define('template_path', group='application')
define('static_path', group='application')
parse_command_line()
application = Application(
handlers, **options.group_dict('application'))
.. versionadded:: 3.1
|
def get_mod(cls):
if isinstance(cls, (type, types.FunctionType)):
ret = cls.__module__
else:
ret = cls.__class__.__module__
return ret
|
Returns the string identifying the module that cls is defined in.
|
def get_bug_log(nr):
reply = _soap_client_call('get_bug_log', nr)
items_el = reply('soapenc:Array')
buglogs = []
for buglog_el in items_el.children():
buglog = {}
buglog["header"] = _parse_string_el(buglog_el("header"))
buglog["body"] = _parse_string_el(buglog_el("body"))
buglog["msg_num"] = int(buglog_el("msg_num"))
buglog["attachments"] = []
mail_parser = email.feedparser.FeedParser()
mail_parser.feed(buglog["header"])
mail_parser.feed("\n\n")
mail_parser.feed(buglog["body"])
buglog["message"] = mail_parser.close()
buglogs.append(buglog)
return buglogs
|
Get Buglogs.
A buglog is a dictionary with the following mappings:
* "header" => string
* "body" => string
* "attachments" => list
* "msg_num" => int
* "message" => email.message.Message
Parameters
----------
nr : int
the bugnumber
Returns
-------
buglogs : list of dicts
|
def Add(self, artifact=None, target=None, callback=None):
if target is None:
target = Target()
os_name = target.Get("os") or [None]
cpe = target.Get("cpe") or [None]
label = target.Get("label") or [None]
attributes = itertools.product(os_name, cpe, label)
new_conditions = [Condition(artifact, *attr) for attr in attributes]
self.conditions.update(new_conditions)
self._Register(new_conditions, callback)
|
Add criteria for a check.
Args:
artifact: An artifact name.
target: A tuple of artifact necessary to process the data.
callback: Entities that should be called if the condition matches.
|
def generate_fetch_ivy(cls, jars, ivyxml, confs, resolve_hash_name):
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
extra_configurations = [conf for conf in confs if conf and conf != 'default']
jars_by_key = OrderedDict()
for jar in jars:
jars_by_key.setdefault((jar.org, jar.name, jar.rev), []).append(jar)
dependencies = [cls._generate_fetch_jar_template(_jars) for _jars in jars_by_key.values()]
template_data = TemplateData(org=org,
module=name,
extra_configurations=extra_configurations,
dependencies=dependencies)
template_relpath = os.path.join('templates', 'ivy_utils', 'ivy_fetch.xml.mustache')
cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
|
Generates an ivy xml with all jars marked as intransitive using the all conflict manager.
|
def invalidate_m2m_cache(sender, instance, model, **kwargs):
logger.debug('Received m2m_changed signals from sender {0}'.format(sender))
update_model_cache(instance._meta.db_table)
update_model_cache(model._meta.db_table)
|
Signal receiver for models to invalidate model cache for many-to-many relationship.
Parameters
~~~~~~~~~~
sender
The model class
instance
The instance whose many-to-many relation is updated.
model
The class of the objects that are added to, removed from or cleared from the relation.
|
def load(self, rel_path=None):
for k, v in self.layer.iteritems():
self.add(k, v['module'], v.get('package'))
filename = v.get('filename')
path = v.get('path')
if filename:
if not path:
path = rel_path
else:
path = os.path.join(rel_path, path)
if isinstance(filename, basestring):
filename = os.path.join(path, filename)
else:
file_list = [os.path.join(path, f) for f in filename]
filename = os.path.pathsep.join(file_list)
self.open(k, filename)
|
Add data_sources to layer and open files with data for the data_source.
|
def get_form_success_data(self, form):
data = {
"html": render_to_string(
"pinax/teams/_invite_form.html",
{
"invite_form": self.get_unbound_form(),
"team": self.team
},
request=self.request
)
}
membership = self.membership
if membership is not None:
if membership.state == Membership.STATE_APPLIED:
fragment_class = ".applicants"
elif membership.state == Membership.STATE_INVITED:
fragment_class = ".invitees"
elif membership.state in (Membership.STATE_AUTO_JOINED, Membership.STATE_ACCEPTED):
fragment_class = {
Membership.ROLE_OWNER: ".owners",
Membership.ROLE_MANAGER: ".managers",
Membership.ROLE_MEMBER: ".members"
}[membership.role]
data.update({
"append-fragments": {
fragment_class: render_to_string(
"pinax/teams/_membership.html",
{
"membership": membership,
"team": self.team
},
request=self.request
)
}
})
return data
|
Allows customization of the JSON data returned when a valid form submission occurs.
|
def apparent_dip_correction(axes):
a1 = axes[0].copy()
a1[-1] = 0
cosa = angle(axes[0],a1,cos=True)
_ = 1-cosa**2
if _ > 1e-12:
sina = N.sqrt(_)
if cosa < 0:
sina *= -1
R= N.array([[cosa,sina],[-sina,cosa]])
else:
R = N.identity(2)
return R
|
Produces a two-dimensional rotation matrix that
rotates a projected dataset to correct for apparent dip
|
def _rollout_metadata(batch_env):
batch_env_shape = batch_env.observ.get_shape().as_list()
batch_size = [batch_env_shape[0]]
shapes_types_names = [
(batch_size + batch_env_shape[1:], batch_env.observ_dtype, "observation"),
(batch_size, tf.float32, "reward"),
(batch_size, tf.bool, "done"),
(batch_size + list(batch_env.action_shape), batch_env.action_dtype,
"action"),
(batch_size, tf.float32, "pdf"),
(batch_size, tf.float32, "value_function"),
]
return shapes_types_names
|
Metadata for rollouts.
|
def _lookup_first(dictionary, key):
value = dictionary[key]
if type(value) == list:
return value[0]
else:
return value
|
Lookup the first value given a key. Returns the first value if the key
refers to a list or the value itself.
:param dict dictionary: The dictionary to search
:param str key: The key to get
:return: Returns the first value available for the key
:rtype: str
|
def save(self, update_site=False, *args, **kwargs):
if update_site or (self.id is None and self.site_id is None):
self.site_id = current_site_id()
super(SiteRelated, self).save(*args, **kwargs)
|
Set the site to the current site when the record is first
created, or the ``update_site`` argument is explicitly set
to ``True``.
|
def set_bar(self, bar, value):
if bar < 0 or bar > 23:
return
c = (bar if bar < 12 else bar - 12) // 4
a = bar % 4
if bar >= 12:
a += 4
self.set_led(c*16+a+8, 1 if value & GREEN > 0 else 0)
self.set_led(c*16+a, 1 if value & RED > 0 else 0)
|
Set bar to desired color. Bar should be a value of 0 to 23, and value
should be OFF, GREEN, RED, or YELLOW.
|
def histogram(values, bins=10, vrange=None, title="", c="g", corner=1, lines=True):
fs, edges = np.histogram(values, bins=bins, range=vrange)
pts = []
for i in range(len(fs)):
pts.append([(edges[i] + edges[i + 1]) / 2, fs[i]])
return xyplot(pts, title, c, corner, lines)
|
Build a 2D histogram from a list of values in n bins.
Use *vrange* to restrict the range of the histogram.
Use *corner* to assign its position:
- 1, topleft,
- 2, topright,
- 3, bottomleft,
- 4, bottomright.
.. hint:: Example: |fitplanes.py|_
|
def fir_from_transfer(transfer, ntaps, window='hanning', ncorner=None):
transfer = truncate_transfer(transfer, ncorner=ncorner)
impulse = npfft.irfft(transfer)
impulse = truncate_impulse(impulse, ntaps=ntaps, window=window)
out = numpy.roll(impulse, int(ntaps/2 - 1))[0:ntaps]
return out
|
Design a Type II FIR filter given an arbitrary transfer function
Parameters
----------
transfer : `numpy.ndarray`
transfer function to start from, must have at least ten samples
ntaps : `int`
number of taps in the final filter, must be an even number
window : `str`, `numpy.ndarray`, optional
window function to truncate with, default: ``'hanning'``
see :func:`scipy.signal.get_window` for details on acceptable formats
ncorner : `int`, optional
number of extra samples to zero off at low frequency, default: `None`
Returns
-------
out : `numpy.ndarray`
A time domain FIR filter of length `ntaps`
Notes
-----
The final FIR filter will use `~numpy.fft.rfft` FFT normalisation.
If `ncorner` is not `None`, then `ncorner` extra samples will be zeroed
on the left as a hard highpass filter.
See Also
--------
scipy.signal.remez
an alternative FIR filter design using the Remez exchange algorithm
|
def address_target_pairs_from_address_families(self, address_families):
single_af = assert_single_element(address_families)
addr_tgt_pairs = [
(addr, tgt) for addr, tgt in single_af.addressables.items()
if addr.target_name == self.name
]
if len(addr_tgt_pairs) == 0:
raise self._SingleAddressResolutionError(single_af, self.name)
assert(len(addr_tgt_pairs) == 1)
return addr_tgt_pairs
|
Return the pair for the single target matching the single AddressFamily, or error.
:raises: :class:`SingleAddress._SingleAddressResolutionError` if no targets could be found for a
:class:`SingleAddress` instance.
:return: list of (Address, Target) pairs with exactly one element.
|
def _create_function(name, doc=""):
def _(col):
spark_ctx = SparkContext._active_spark_context
java_ctx = (getattr(spark_ctx._jvm.com.sparklingpandas.functions,
name)
(col._java_ctx if isinstance(col, Column) else col))
return Column(java_ctx)
_.__name__ = name
_.__doc__ = doc
return _
|
Create a function for aggregator by name
|
def _ReadSupportedOS(self, definition_values, definition_object, name):
supported_os = definition_values.get('supported_os', [])
if not isinstance(supported_os, list):
raise errors.FormatError(
'Invalid supported_os type: {0!s}'.format(type(supported_os)))
undefined_supported_os = set(supported_os).difference(self.supported_os)
if undefined_supported_os:
error_string = (
'Artifact definition: {0:s} undefined supported operating system: '
'{1:s}.').format(name, ', '.join(undefined_supported_os))
raise errors.FormatError(error_string)
definition_object.supported_os = supported_os
|
Reads the optional artifact or source type supported OS.
Args:
definition_values (dict[str, object]): artifact definition values.
definition_object (ArtifactDefinition|SourceType): the definition object.
name (str): name of the artifact definition.
Raises:
FormatError: if there are undefined supported operating systems.
|
def _build_receipt_table(result, billing="hourly", test=False):
title = "OrderId: %s" % (result.get('orderId', 'No order placed'))
table = formatting.Table(['Cost', 'Description'], title=title)
table.align['Cost'] = 'r'
table.align['Description'] = 'l'
total = 0.000
if test:
prices = result['prices']
else:
prices = result['orderDetails']['prices']
for item in prices:
rate = 0.000
if billing == "hourly":
rate += float(item.get('hourlyRecurringFee', 0.000))
else:
rate += float(item.get('recurringFee', 0.000))
total += rate
table.add_row([rate, item['item']['description']])
table.add_row(["%.3f" % total, "Total %s cost" % billing])
return table
|
Retrieve the total recurring fee of the items prices
|
def get_dependency_metadata():
link = os.path.join(_api_url(), 'meta.txt')
return _process_req_txt(requests.get(link)).split('\n')
|
Returns list of strings with dependency metadata from Dapi
|
def remove_child_repositories(self, repository_id):
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalogs(catalog_id=repository_id)
return self._hierarchy_session.remove_children(id_=repository_id)
|
Removes all children from a repository.
arg: repository_id (osid.id.Id): the ``Id`` of a repository
raise: NotFound - ``repository_id`` not in hierarchy
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
def agent_path(cls, project, agent):
return google.api_core.path_template.expand(
'projects/{project}/agents/{agent}',
project=project,
agent=agent,
)
|
Return a fully-qualified agent string.
|
def generate_distance_matrix(source, target, weights=None):
if weights is None:
weights = ones((source.shape[1], 1))
sLen = source.shape[0]
tLen = target.shape[0]
distMat = zeros((sLen, tLen))
for i in range(sLen):
for j in range(tLen):
distMat[i, j] = euclidean(source[i, :], target[j, :])
return distMat
|
Generates a local distance matrix for use in dynamic time warping.
Parameters
----------
source : 2D array
Source matrix with features in the second dimension.
target : 2D array
Target matrix with features in the second dimension.
Returns
-------
2D array
Local distance matrix.
|
def removeAssociation(self, server_url, handle):
assoc = self.getAssociation(server_url, handle)
if assoc is None:
return 0
else:
filename = self.getAssociationFilename(server_url, handle)
return _removeIfPresent(filename)
|
Remove an association if it exists. Do nothing if it does not.
(str, str) -> bool
|
def mobile(self):
if self._mobile is None:
self._mobile = MobileList(self._version, account_sid=self._solution['account_sid'], )
return self._mobile
|
Access the mobile
:returns: twilio.rest.api.v2010.account.incoming_phone_number.mobile.MobileList
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.mobile.MobileList
|
def prepare_normal_vectors(atomselection):
ring_atomselection = [atomselection.coordinates()[a] for a in [0,2,4]]
vect1 = self.vector(ring_atomselection[0],ring_atomselection[1])
vect2 = self.vector(ring_atomselection[2],ring_atomselection[0])
return self.normalize_vector(np.cross(vect1,vect2))
|
Create and normalize a vector across ring plane.
|
def set_deadline(self, end):
if self.get_deadline_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_timestamp(
end,
self.get_deadline_metadata()):
raise errors.InvalidArgument()
self._my_map['deadline'] = end
|
Sets the assessment end time.
arg: end (timestamp): assessment end time
raise: InvalidArgument - ``end`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
|
def to_representation(self, instance):
if not isinstance(instance, dict):
data = super(
DynamicEphemeralSerializer,
self
).to_representation(instance)
else:
data = instance
instance = EphemeralObject(data)
if self.id_only():
return data
else:
return tag_dict(data, serializer=self, instance=instance)
|
Provides post processing. Sub-classes should implement their own
to_representation method, but pass the resulting dict through
this function to get tagging and field selection.
Arguments:
instance: Serialized dict, or object. If object,
it will be serialized by the super class's
to_representation() method.
|
def get_catalog(self, locale):
with translation.override(locale):
translation_engine = DjangoTranslation(locale, domain=self.domain, localedirs=self.paths)
trans_cat = translation_engine._catalog
trans_fallback_cat = translation_engine._fallback._catalog if translation_engine._fallback else {}
return trans_cat, trans_fallback_cat
|
Create Django translation catalogue for `locale`.
|
def checkAndCreate(self, key, payload, domainId):
if key not in self:
self[key] = payload
oid = self[key]['id']
if not oid:
return False
subnetDomainIds = []
for domain in self[key]['domains']:
subnetDomainIds.append(domain['id'])
if domainId not in subnetDomainIds:
subnetDomainIds.append(domainId)
self[key]["domain_ids"] = subnetDomainIds
if len(self[key]["domains"]) is not len(subnetDomainIds):
return False
return oid
|
Function checkAndCreate
Check if a subnet exists and create it if not
@param key: The targeted subnet
@param payload: The targeted subnet description
@param domainId: The domainId to be attached wiuth the subnet
@return RETURN: The id of the subnet
|
def poly_energies(samples_like, poly):
msg = ("poly_energies is deprecated and will be removed in dimod 0.9.0."
"In the future, use BinaryPolynomial.energies")
warnings.warn(msg, DeprecationWarning)
return BinaryPolynomial(poly, 'SPIN').energies(samples_like)
|
Calculates energy of samples from a higher order polynomial.
Args:
sample (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a
tuple of variables and `bias` the associated bias. Variable
labeling/indexing of terms in poly dict must match that of the
sample(s).
Returns:
list/:obj:`numpy.ndarray`: The energy of the sample(s).
|
def _hardware_count(self):
return self._counts.get("hardware") + self._counts.get("serial") + self._counts.get("mbed")
|
Amount of hardware resources.
:return: integer
|
def get_log_entries_by_ids(self, log_entry_ids):
collection = JSONClientValidated('logging',
collection='LogEntry',
runtime=self._runtime)
object_id_list = []
for i in log_entry_ids:
object_id_list.append(ObjectId(self._get_id(i, 'logging').get_identifier()))
result = collection.find(
dict({'_id': {'$in': object_id_list}},
**self._view_filter()))
result = list(result)
sorted_result = []
for object_id in object_id_list:
for object_map in result:
if object_map['_id'] == object_id:
sorted_result.append(object_map)
break
return objects.LogEntryList(sorted_result, runtime=self._runtime, proxy=self._proxy)
|
Gets a ``LogEntryList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the entries
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible logentries may be omitted from the list and may
present the elements in any order including returning a unique
set.
arg: log_entry_ids (osid.id.IdList): the list of ``Ids`` to
retrieve
return: (osid.logging.LogEntryList) - the returned ``LogEntry
list``
raise: NotFound - an ``Id was`` not found
raise: NullArgument - ``log_entry_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
def _get_snmpv3(self, oid):
snmp_target = (self.hostname, self.snmp_port)
cmd_gen = cmdgen.CommandGenerator()
(error_detected, error_status, error_index, snmp_data) = cmd_gen.getCmd(
cmdgen.UsmUserData(
self.user,
self.auth_key,
self.encrypt_key,
authProtocol=self.auth_proto,
privProtocol=self.encryp_proto,
),
cmdgen.UdpTransportTarget(snmp_target, timeout=1.5, retries=2),
oid,
lookupNames=True,
lookupValues=True,
)
if not error_detected and snmp_data[0][1]:
return text_type(snmp_data[0][1])
return ""
|
Try to send an SNMP GET operation using SNMPv3 for the specified OID.
Parameters
----------
oid : str
The SNMP OID that you want to get.
Returns
-------
string : str
The string as part of the value from the OID you are trying to retrieve.
|
def _check_valid_data(self, data):
if len(data.shape) == 2 and data.shape[1] != 1:
raise ValueError('Can only initialize Direction from a single Nx1 array')
if np.abs(np.linalg.norm(data) - 1.0) > 1e-4:
raise ValueError('Direction data must have norm=1.0')
|
Checks that the incoming data is a Nx1 ndarray.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to verify.
Raises
------
ValueError
If the data is not of the correct shape or if the vector is not
normed.
|
def countbit(self, name, start=None, size=None):
if start is not None and size is not None:
start = get_integer('start', start)
size = get_integer('size', size)
return self.execute_command('countbit', name, start, size)
elif start is not None:
start = get_integer('start', start)
return self.execute_command('countbit', name, start)
return self.execute_command('countbit', name)
|
Returns the count of set bits in the value of ``key``. Optional
``start`` and ``size`` paramaters indicate which bytes to consider.
Similiar with **Redis.BITCOUNT**
:param string name: the key name
:param int start: Optional, if start is negative, count from start'th
character from the end of string.
:param int size: Optional, if size is negative, then that many
characters will be omitted from the end of string.
:return: the count of the bit 1
:rtype: int
>>> ssdb.set('bit_test', 1)
True
>>> ssdb.countbit('bit_test')
3
>>> ssdb.set('bit_test','1234567890')
True
>>> ssdb.countbit('bit_test', 0, 1)
3
>>> ssdb.countbit('bit_test', 3, -3)
16
|
def _build_one_legacy(self, req, tempd, python_tag=None):
base_args = self._base_setup_args(req)
spin_message = 'Building wheel for %s (setup.py)' % (req.name,)
with open_spinner(spin_message) as spinner:
logger.debug('Destination directory: %s', tempd)
wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
+ self.build_options
if python_tag is not None:
wheel_args += ["--python-tag", python_tag]
try:
output = call_subprocess(wheel_args, cwd=req.setup_py_dir,
show_stdout=False, spinner=spinner)
except Exception:
spinner.finish("error")
logger.error('Failed building wheel for %s', req.name)
return None
names = os.listdir(tempd)
wheel_path = get_legacy_build_wheel_path(
names=names,
temp_dir=tempd,
req=req,
command_args=wheel_args,
command_output=output,
)
return wheel_path
|
Build one InstallRequirement using the "legacy" build process.
Returns path to wheel if successfully built. Otherwise, returns None.
|
def warp_by_scalar(dataset, scalars=None, scale_factor=1.0, normal=None,
inplace=False):
if scalars is None:
field, scalars = dataset.active_scalar_info
arr, field = get_scalar(dataset, scalars, preference='point', info=True)
if field != vtki.POINT_DATA_FIELD:
raise AssertionError('Dataset can only by warped by a point data array.')
alg = vtk.vtkWarpScalar()
alg.SetInputDataObject(dataset)
alg.SetInputArrayToProcess(0, 0, 0, field, scalars)
alg.SetScaleFactor(scale_factor)
if normal is not None:
alg.SetNormal(normal)
alg.SetUseNormal(True)
alg.Update()
output = _get_output(alg)
if inplace:
dataset.points = output.points
return
return output
|
Warp the dataset's points by a point data scalar array's values.
This modifies point coordinates by moving points along point normals by
the scalar amount times the scale factor.
Parameters
----------
scalars : str, optional
Name of scalars to warb by. Defaults to currently active scalars.
scale_factor : float, optional
A scalaing factor to increase the scaling effect
normal : np.array, list, tuple of length 3
User specified normal. If given, data normals will be ignored and
the given normal will be used to project the warp.
inplace : bool
If True, the points of the give dataset will be updated.
|
def _get_computer_object():
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://.,computer')
|
A helper function to get the object for the local machine
Returns:
object: Returns the computer object for the local machine
|
def tee_lookahead(t, i):
for value in islice(t.__copy__(), i, None):
return value
raise IndexError(i)
|
Inspect the i-th upcomping value from a tee object
while leaving the tee object at its current position.
Raise an IndexError if the underlying iterator doesn't
have enough values.
|
def _sample_rows(self, X, Y, sample_shape, seed):
if sample_shape[0] is None or X.shape[0] <= sample_shape[0]:
X_sample, Y_sample = X, Y
elif Y is None:
np.random.seed(seed)
row_indices = np.random.choice(
X.shape[0], size=sample_shape[0], replace=False
)
X_sample, Y_sample = X.iloc[row_indices], Y
else:
drop_size = X.shape[0] - sample_shape[0]
sample_size = sample_shape[0]
sss = StratifiedShuffleSplit(
n_splits=2, test_size=drop_size, train_size=sample_size, random_state=seed
)
row_indices, _ = next(sss.split(X, Y))
X_sample, Y_sample = X.iloc[row_indices], Y.iloc[row_indices]
return (X_sample, Y_sample)
|
Stratified uniform sampling of rows, according to the classes in Y.
Ensures there are enough samples from each class in Y for cross
validation.
|
def popitem (self):
if self._keys:
k = self._keys[0]
v = self[k]
del self[k]
return (k, v)
raise KeyError("popitem() on empty dictionary")
|
Remove oldest key from dict and return item.
|
def _decode_length(self, offset, sizeof_char):
sizeof_2chars = sizeof_char << 1
fmt = "<2{}".format('B' if sizeof_char == 1 else 'H')
highbit = 0x80 << (8 * (sizeof_char - 1))
length1, length2 = unpack(fmt, self.m_charbuff[offset:(offset + sizeof_2chars)])
if (length1 & highbit) != 0:
length = ((length1 & ~highbit) << (8 * sizeof_char)) | length2
size = sizeof_2chars
else:
length = length1
size = sizeof_char
if sizeof_char == 1:
assert length <= 0x7FFF, "length of UTF-8 string is too large! At offset={}".format(offset)
else:
assert length <= 0x7FFFFFFF, "length of UTF-16 string is too large! At offset={}".format(offset)
return length, size
|
Generic Length Decoding at offset of string
The method works for both 8 and 16 bit Strings.
Length checks are enforced:
* 8 bit strings: maximum of 0x7FFF bytes (See
http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/ResourceTypes.cpp#692)
* 16 bit strings: maximum of 0x7FFFFFF bytes (See
http://androidxref.com/9.0.0_r3/xref/frameworks/base/libs/androidfw/ResourceTypes.cpp#670)
:param offset: offset into the string data section of the beginning of
the string
:param sizeof_char: number of bytes per char (1 = 8bit, 2 = 16bit)
:returns: tuple of (length, read bytes)
|
def get_partition_dciId(self, org_name, part_name, part_info=None):
if part_info is None:
part_info = self._get_partition(org_name, part_name)
LOG.info("query result from dcnm for partition info is %s",
part_info)
if part_info is not None and "dciId" in part_info:
return part_info.get("dciId")
|
get DCI ID for the partition.
:param org_name: name of organization
:param part_name: name of partition
|
def get_open_fds():
pid = os.getpid()
procs = subprocess.check_output(["lsof", '-w', '-Ff', "-p", str(pid)])
procs = procs.decode("utf-8")
return len([s for s in procs.split('\n')
if s and s[0] == 'f' and s[1:].isdigit()])
|
Return the number of open file descriptors for current process
.. warning: will only work on UNIX-like OS-es.
|
def get_dimension_by_unit_measure_or_abbreviation(measure_or_unit_abbreviation,**kwargs):
unit_abbreviation, factor = _parse_unit(measure_or_unit_abbreviation)
units = db.DBSession.query(Unit).filter(Unit.abbreviation==unit_abbreviation).all()
if len(units) == 0:
raise HydraError('Unit %s not found.'%(unit_abbreviation))
elif len(units) > 1:
raise HydraError('Unit %s has multiple dimensions not found.'%(unit_abbreviation))
else:
dimension = db.DBSession.query(Dimension).filter(Dimension.id==units[0].dimension_id).one()
return str(dimension.name)
|
Return the physical dimension a given unit abbreviation of a measure, or the measure itself, refers to.
The search key is the abbreviation or the full measure
|
def _makeTimingRelative(absoluteDataList):
timingSeq = [row[0] for row in absoluteDataList]
valueSeq = [list(row[1:]) for row in absoluteDataList]
relTimingSeq, startTime, endTime = makeSequenceRelative(timingSeq)
relDataList = [tuple([time, ] + row) for time, row
in zip(relTimingSeq, valueSeq)]
return relDataList, startTime, endTime
|
Given normal pitch tier data, puts the times on a scale from 0 to 1
Input is a list of tuples of the form
([(time1, pitch1), (time2, pitch2),...]
Also returns the start and end time so that the process can be reversed
|
def create_response(self, data=None):
frame = deepcopy(self)
if data is not None:
frame.data = data
frame.length = 2 + len(frame.data)
return frame
|
Create a response frame based on this frame.
:param data: Data section of response as bytearray. If None, request data section is kept.
:return: ModbusTCPFrame instance that represents a response
|
def size(self):
return (0 if self.shape == () else
int(np.prod(self.shape, dtype='int64')))
|
Total number of grid points.
|
def read_egginfo_json(pkg_name, filename=DEFAULT_JSON, working_set=None):
working_set = working_set or default_working_set
dist = find_pkg_dist(pkg_name, working_set=working_set)
return read_dist_egginfo_json(dist, filename)
|
Read json from egginfo of a package identified by `pkg_name` that's
already installed within the current Python environment.
|
def begin_abort(self, root_pipeline_key, abort_message):
def txn():
pipeline_record = db.get(root_pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to abort root pipeline ID "%s" but it does not exist.',
root_pipeline_key.name())
raise db.Rollback()
if pipeline_record.status == _PipelineRecord.ABORTED:
logging.warning(
'Tried to abort root pipeline ID "%s"; already in state: %s',
root_pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
if pipeline_record.abort_requested:
logging.warning(
'Tried to abort root pipeline ID "%s"; abort signal already sent.',
root_pipeline_key.name())
raise db.Rollback()
pipeline_record.abort_requested = True
pipeline_record.abort_message = abort_message
pipeline_record.put()
task = taskqueue.Task(
url=self.fanout_abort_handler_path,
params=dict(root_pipeline_key=root_pipeline_key))
task.add(queue_name=self.queue_name, transactional=True)
return True
return db.run_in_transaction(txn)
|
Kicks off the abort process for a root pipeline and all its children.
Args:
root_pipeline_key: db.Key of the root pipeline to abort.
abort_message: Message explaining why the abort happened, only saved
into the root pipeline.
Returns:
True if the abort signal was sent successfully; False otherwise.
|
def info_count(i: int, n: int, *rest: Token, **kwargs: Any) -> None:
num_digits = len(str(n))
counter_format = "(%{}d/%d)".format(num_digits)
counter_str = counter_format % (i + 1, n)
info(green, "*", reset, counter_str, reset, *rest, **kwargs)
|
Display a counter before the rest of the message.
``rest`` and ``kwargs`` are passed to :func:`info`
Current index should start at 0 and end at ``n-1``, like in ``enumerate()``
:param i: current index
:param n: total number of items
|
def GetMemUsedMB(self):
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemUsedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value
|
Retrieves the estimated amount of physical host memory currently
consumed for this virtual machine's physical memory.
|
def png(self):
use_plugin('freeimage')
with TemporaryFilePath(suffix='.png') as tmp:
safe_range_im = 255 * normalise(self)
imsave(tmp.fpath, safe_range_im.astype(np.uint8))
with open(tmp.fpath, 'rb') as fh:
return fh.read()
|
Return png string of image.
|
def _parse_canonical_int32(doc):
i_str = doc['$numberInt']
if len(doc) != 1:
raise TypeError('Bad $numberInt, extra field(s): %s' % (doc,))
if not isinstance(i_str, string_type):
raise TypeError('$numberInt must be string: %s' % (doc,))
return int(i_str)
|
Decode a JSON int32 to python int.
|
def loadedfields(self):
if self._loadedfields is None:
for field in self._meta.scalarfields:
yield field
else:
fields = self._meta.dfields
processed = set()
for name in self._loadedfields:
if name in processed:
continue
if name in fields:
processed.add(name)
yield fields[name]
else:
name = name.split(JSPLITTER)[0]
if name in fields and name not in processed:
field = fields[name]
if field.type == 'json object':
processed.add(name)
yield field
|
Generator of fields loaded from database
|
def is_inside_any(dir_list, fname):
for dirname in dir_list:
if is_inside(dirname, fname):
return True
return False
|
True if fname is inside any of given dirs.
|
def shutdown(self, vm_names=None, reboot=False):
self.virt_env.shutdown(vm_names, reboot)
|
Shutdown this prefix
Args:
vm_names(list of str): List of the vms to shutdown
reboot(bool): If true, reboot the requested vms
Returns:
None
|
def altshuler_debyetemp(v, v0, gamma0, gamma_inf, beta, theta0):
x = v / v0
if isuncertainties([v, v0, gamma0, gamma_inf, beta, theta0]):
theta = theta0 * np.power(x, -1. * gamma_inf) *\
unp.exp((gamma0 - gamma_inf) / beta * (1. - np.power(x, beta)))
else:
theta = theta0 * np.power(x, -1. * gamma_inf) *\
np.exp((gamma0 - gamma_inf) / beta * (1. - np.power(x, beta)))
return theta
|
calculate Debye temperature for Altshuler equation
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param gamma_inf: Gruneisen parameter at infinite pressure
:param beta: volume dependence of Gruneisen parameter
:param theta0: Debye temperature at 1 bar in K
:return: Debye temperature in K
|
def utterances_from_eaf(eaf_path: Path, tier_prefixes: Tuple[str, ...]) -> List[Utterance]:
if not eaf_path.is_file():
raise FileNotFoundError("Cannot find {}".format(eaf_path))
eaf = Eaf(eaf_path)
utterances = []
for tier_name in sorted(list(eaf.tiers)):
for tier_prefix in tier_prefixes:
if tier_name.startswith(tier_prefix):
utterances.extend(utterances_from_tier(eaf, tier_name))
break
return utterances
|
Extracts utterances in tiers that start with tier_prefixes found in the ELAN .eaf XML file
at eaf_path.
For example, if xv@Mark is a tier in the eaf file, and
tier_prefixes = ["xv"], then utterances from that tier will be gathered.
|
def parse_connection_option(
header: str, pos: int, header_name: str
) -> Tuple[ConnectionOption, int]:
item, pos = parse_token(header, pos, header_name)
return cast(ConnectionOption, item), pos
|
Parse a Connection option from ``header`` at the given position.
Return the protocol value and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs.
|
def multi_substitution(*substitutions):
substitutions = itertools.starmap(substitution, substitutions)
substitutions = reversed(tuple(substitutions))
return compose(*substitutions)
|
Take a sequence of pairs specifying substitutions, and create
a function that performs those substitutions.
>>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
'baz'
|
def _get_run_info_dict(self, run_id):
run_info_path = os.path.join(self._settings.info_dir, run_id, 'info')
if os.path.exists(run_info_path):
return RunInfo(run_info_path).get_as_dict()
else:
return None
|
Get the RunInfo for a run, as a dict.
|
def data_ma(self):
mask = (self._segment_img[self.slices] != self.label)
return np.ma.masked_array(self._segment_img[self.slices], mask=mask)
|
A 2D `~numpy.ma.MaskedArray` cutout image of the segment using
the minimal bounding box.
The mask is `True` for pixels outside of the source segment
(i.e. neighboring segments within the rectangular cutout image
are masked).
|
def srp1(*args, **kargs):
ans, _ = srp(*args, **kargs)
if len(ans) > 0:
return ans[0][1]
else:
return None
|
Send and receive packets at layer 2 and return only the first answer
|
def indirectStarter(url, latestSearch):
@classmethod
def _starter(cls):
data = cls.getPage(url)
return cls.fetchUrl(url, data, latestSearch)
return _starter
|
Get start URL by indirection.
|
def save(self):
if self.path.is_collection:
self.session.post_json(self.href,
{self.type: dict(self.data)},
cls=ResourceEncoder)
else:
self.session.put_json(self.href,
{self.type: dict(self.data)},
cls=ResourceEncoder)
return self.fetch(exclude_children=True, exclude_back_refs=True)
|
Save the resource to the API server
If the resource doesn't have a uuid the resource will be created.
If uuid is present the resource is updated.
:rtype: Resource
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.