code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def _process_counter_example(self, mma, w_string):
""""
Process a counterexample in the Rivest-Schapire way.
Args:
mma (DFA): The hypothesis automaton
w_string (str): The examined string to be consumed
Returns:
None
"""
diff = len(w_string)
same = 0
membership_answer = self._membership_query(w_string)
while True:
i = (same + diff) / 2
access_string = self._run_in_hypothesis(mma, w_string, i)
if membership_answer != self._membership_query(access_string + w_string[i:]):
diff = i
else:
same = i
if diff - same == 1:
break
exp = w_string[diff:]
self.observation_table.em_vector.append(exp)
for row in self.observation_table.sm_vector + self.observation_table.smi_vector:
self._fill_table_entry(row, exp)
return 0 | Process a counterexample in the Rivest-Schapire way.
Args:
mma (DFA): The hypothesis automaton
w_string (str): The examined string to be consumed
Returns:
None |
def _set_options_headers(self, methods):
""" Set proper headers.
Sets following headers:
Allow
Access-Control-Allow-Methods
Access-Control-Allow-Headers
Arguments:
:methods: Sequence of HTTP method names that are value for
requested URI
"""
request = self.request
response = request.response
response.headers['Allow'] = ', '.join(sorted(methods))
if 'Access-Control-Request-Method' in request.headers:
response.headers['Access-Control-Allow-Methods'] = \
', '.join(sorted(methods))
if 'Access-Control-Request-Headers' in request.headers:
response.headers['Access-Control-Allow-Headers'] = \
'origin, x-requested-with, content-type'
return response | Set proper headers.
Sets following headers:
Allow
Access-Control-Allow-Methods
Access-Control-Allow-Headers
Arguments:
:methods: Sequence of HTTP method names that are value for
requested URI |
def observed_data_to_xarray(self):
"""Convert observed_data to xarray."""
data = self.observed_data
if not isinstance(data, dict):
raise TypeError("DictConverter.observed_data is not a dictionary")
if self.dims is None:
dims = {}
else:
dims = self.dims
observed_data = dict()
for key, vals in data.items():
vals = np.atleast_1d(vals)
val_dims = dims.get(key)
val_dims, coords = generate_dims_coords(
vals.shape, key, dims=val_dims, coords=self.coords
)
observed_data[key] = xr.DataArray(vals, dims=val_dims, coords=coords)
return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=None)) | Convert observed_data to xarray. |
def locale(self) -> tornado.locale.Locale:
"""The locale for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
.. versionchanged: 4.1
Added a property setter.
"""
if not hasattr(self, "_locale"):
loc = self.get_user_locale()
if loc is not None:
self._locale = loc
else:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale | The locale for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
.. versionchanged: 4.1
Added a property setter. |
def _power_mismatch_dc(self, buses, generators, B, Pbusinj, base_mva):
""" Returns the power mismatch constraint (B*Va + Pg = Pd).
"""
nb, ng = len(buses), len(generators)
# Negative bus-generator incidence matrix.
gen_bus = array([g.bus._i for g in generators])
neg_Cg = csr_matrix((-ones(ng), (gen_bus, range(ng))), (nb, ng))
Amis = hstack([B, neg_Cg], format="csr")
Pd = array([bus.p_demand for bus in buses])
Gs = array([bus.g_shunt for bus in buses])
bmis = -(Pd - Gs) / base_mva - Pbusinj
return LinearConstraint("Pmis", Amis, bmis, bmis, ["Va", "Pg"]) | Returns the power mismatch constraint (B*Va + Pg = Pd). |
def parse_params(self,
n_samples=None,
dx_min=-0.1,
dx_max=0.1,
n_dxs=2,
dy_min=-0.1,
dy_max=0.1,
n_dys=2,
angle_min=-30,
angle_max=30,
n_angles=6,
black_border_size=0,
**kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
:param n_samples: (optional) The number of transformations sampled to
construct the attack. Set it to None to run
full grid attack.
:param dx_min: (optional float) Minimum translation ratio along x-axis.
:param dx_max: (optional float) Maximum translation ratio along x-axis.
:param n_dxs: (optional int) Number of discretized translation ratios
along x-axis.
:param dy_min: (optional float) Minimum translation ratio along y-axis.
:param dy_max: (optional float) Maximum translation ratio along y-axis.
:param n_dys: (optional int) Number of discretized translation ratios
along y-axis.
:param angle_min: (optional float) Largest counter-clockwise rotation
angle.
:param angle_max: (optional float) Largest clockwise rotation angle.
:param n_angles: (optional int) Number of discretized angles.
:param black_border_size: (optional int) size of the black border in pixels.
"""
self.n_samples = n_samples
self.dx_min = dx_min
self.dx_max = dx_max
self.n_dxs = n_dxs
self.dy_min = dy_min
self.dy_max = dy_max
self.n_dys = n_dys
self.angle_min = angle_min
self.angle_max = angle_max
self.n_angles = n_angles
self.black_border_size = black_border_size
if self.dx_min < -1 or self.dy_min < -1 or \
self.dx_max > 1 or self.dy_max > 1:
raise ValueError("The value of translation must be bounded "
"within [-1, 1]")
if len(kwargs.keys()) > 0:
warnings.warn("kwargs is unused and will be removed on or after "
"2019-04-26.")
return True | Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
:param n_samples: (optional) The number of transformations sampled to
construct the attack. Set it to None to run
full grid attack.
:param dx_min: (optional float) Minimum translation ratio along x-axis.
:param dx_max: (optional float) Maximum translation ratio along x-axis.
:param n_dxs: (optional int) Number of discretized translation ratios
along x-axis.
:param dy_min: (optional float) Minimum translation ratio along y-axis.
:param dy_max: (optional float) Maximum translation ratio along y-axis.
:param n_dys: (optional int) Number of discretized translation ratios
along y-axis.
:param angle_min: (optional float) Largest counter-clockwise rotation
angle.
:param angle_max: (optional float) Largest clockwise rotation angle.
:param n_angles: (optional int) Number of discretized angles.
:param black_border_size: (optional int) size of the black border in pixels. |
def p_text(self, text):
'''text : TEXT PARBREAK
| TEXT
| PARBREAK'''
item = text[1]
text[0] = item if item[0] != "\n" else u""
if len(text) > 2:
text[0] += "\n" | text : TEXT PARBREAK
| TEXT
| PARBREAK |
def participant_ids(self):
""":class:`~hangups.user.UserID` of users involved (:class:`list`)."""
return [user.UserID(chat_id=id_.chat_id, gaia_id=id_.gaia_id)
for id_ in self._event.membership_change.participant_ids] | :class:`~hangups.user.UserID` of users involved (:class:`list`). |
def get_log_entry_mdata():
"""Return default mdata map for LogEntry"""
return {
'priority': {
'element_label': {
'text': 'priority',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.type.Type object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_type_values': ['NoneType%3ANONE%40dlkit.mit.edu'],
'syntax': 'TYPE',
'type_set': [],
},
'timestamp': {
'element_label': {
'text': 'timestamp',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter a valid datetime object.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_date_time_values': [MIN_DATETIME],
'syntax': 'DATETIME',
'date_time_set': []
},
'agent': {
'element_label': {
'text': 'agent',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
} | Return default mdata map for LogEntry |
def center (self):
"""center() -> (x, y)
Returns the center (of mass) point of this Polygon.
See http://en.wikipedia.org/wiki/Polygon
Examples:
>>> p = Polygon()
>>> p.vertices = [ Point(3, 8), Point(6, 4), Point(0, 3) ]
>>> p.center()
Point(2.89285714286, 4.82142857143)
"""
Cx = 0.0
Cy = 0.0
denom = 6.0 * self.area()
for segment in self.segments():
x = (segment.p.x + segment.q.x)
y = (segment.p.y + segment.q.y)
xy = (segment.p.x * segment.q.y) - (segment.q.x * segment.p.y)
Cx += (x * xy)
Cy += (y * xy)
Cx /= denom
Cy /= denom
return Point(Cx, Cy) | center() -> (x, y)
Returns the center (of mass) point of this Polygon.
See http://en.wikipedia.org/wiki/Polygon
Examples:
>>> p = Polygon()
>>> p.vertices = [ Point(3, 8), Point(6, 4), Point(0, 3) ]
>>> p.center()
Point(2.89285714286, 4.82142857143) |
def _get_managed_files(self):
'''
Build a in-memory data of all managed files.
'''
if self.grains_core.os_data().get('os_family') == 'Debian':
return self.__get_managed_files_dpkg()
elif self.grains_core.os_data().get('os_family') in ['Suse', 'redhat']:
return self.__get_managed_files_rpm()
return list(), list(), list() | Build a in-memory data of all managed files. |
def consume_texture_coordinates(self):
"""Consume all consecutive texture coordinates"""
# The first iteration processes the current/first vt statement.
# The loop continues until there are no more vt-statements or StopIteration is raised by generator
while True:
yield (
float(self.values[1]),
float(self.values[2]),
)
try:
self.next_line()
except StopIteration:
break
if not self.values:
break
if self.values[0] != "vt":
break | Consume all consecutive texture coordinates |
def update_last_wm_layers(self, service_id, num_layers=10):
"""
Update and index the last added and deleted layers (num_layers) in WorldMap service.
"""
from hypermap.aggregator.models import Service
LOGGER.debug(
'Updating the index the last %s added and %s deleted layers in WorldMap service'
% (num_layers, num_layers)
)
service = Service.objects.get(id=service_id)
# TODO raise error if service type is not WM type
if service.type == 'Hypermap:WorldMapLegacy':
from hypermap.aggregator.models import update_layers_wm_legacy as update_layers_wm
if service.type == 'Hypermap:WorldMap':
from hypermap.aggregator.models import update_layers_geonode_wm as update_layers_wm
update_layers_wm(service, num_layers)
# Remove in search engine last num_layers that were deleted
LOGGER.debug('Removing the index for the last %s deleted layers' % num_layers)
layer_to_unindex = service.layer_set.filter(was_deleted=True).order_by('-last_updated')[0:num_layers]
for layer in layer_to_unindex:
if not settings.REGISTRY_SKIP_CELERY:
unindex_layer(layer.id, use_cache=True)
else:
unindex_layer(layer.id)
# Add/Update in search engine last num_layers that were added
LOGGER.debug('Adding/Updating the index for the last %s added layers' % num_layers)
layer_to_index = service.layer_set.filter(was_deleted=False).order_by('-last_updated')[0:num_layers]
for layer in layer_to_index:
if not settings.REGISTRY_SKIP_CELERY:
index_layer(layer.id, use_cache=True)
else:
index_layer(layer.id) | Update and index the last added and deleted layers (num_layers) in WorldMap service. |
def rmon_alarm_entry_alarm_rising_threshold(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rmon = ET.SubElement(config, "rmon", xmlns="urn:brocade.com:mgmt:brocade-rmon")
alarm_entry = ET.SubElement(rmon, "alarm-entry")
alarm_index_key = ET.SubElement(alarm_entry, "alarm-index")
alarm_index_key.text = kwargs.pop('alarm_index')
alarm_rising_threshold = ET.SubElement(alarm_entry, "alarm-rising-threshold")
alarm_rising_threshold.text = kwargs.pop('alarm_rising_threshold')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def stringClade(taxrefs, name, at):
'''Return a Newick string from a list of TaxRefs'''
string = []
for ref in taxrefs:
# distance is the difference between the taxonomic level of the ref
# and the current level of the tree growth
d = float(at-ref.level)
# ensure no spaces in ident, Newick tree cannot have spaces
ident = re.sub("\s", "_", ref.ident)
string.append('{0}:{1}'.format(ident, d))
# join into single string with a name for the clade
string = ','.join(string)
string = '({0}){1}'.format(string, name)
return string | Return a Newick string from a list of TaxRefs |
def recovery(self, using=None, **kwargs):
"""
The indices recovery API provides insight into on-going shard
recoveries for the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.recovery`` unchanged.
"""
return self._get_connection(using).indices.recovery(index=self._name, **kwargs) | The indices recovery API provides insight into on-going shard
recoveries for the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.recovery`` unchanged. |
def getObjectByPid(self, pid):
"""
Args:
pid : str
Returns:
str : URIRef of the entry identified by ``pid``."""
self._check_initialized()
opid = rdflib.term.Literal(pid)
res = [o for o in self.subjects(predicate=DCTERMS.identifier, object=opid)]
return res[0] | Args:
pid : str
Returns:
str : URIRef of the entry identified by ``pid``. |
def vm_snapshot_create(vm_name, kwargs=None, call=None):
'''
Creates a new virtual machine snapshot from the provided VM.
.. versionadded:: 2016.3.0
vm_name
The name of the VM from which to create the snapshot.
snapshot_name
The name of the snapshot to be created.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_snapshot_create my-vm snapshot_name=my-new-snapshot
'''
if call != 'action':
raise SaltCloudSystemExit(
'The vm_snapshot_create action must be called with -a or --action.'
)
if kwargs is None:
kwargs = {}
snapshot_name = kwargs.get('snapshot_name', None)
if snapshot_name is None:
raise SaltCloudSystemExit(
'The vm_snapshot_create function requires a \'snapshot_name\' to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
vm_id = int(get_vm_id(kwargs={'name': vm_name}))
response = server.one.vm.snapshotcreate(auth, vm_id, snapshot_name)
data = {
'action': 'vm.snapshotcreate',
'snapshot_created': response[0],
'snapshot_id': response[1],
'error_code': response[2],
}
return data | Creates a new virtual machine snapshot from the provided VM.
.. versionadded:: 2016.3.0
vm_name
The name of the VM from which to create the snapshot.
snapshot_name
The name of the snapshot to be created.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_snapshot_create my-vm snapshot_name=my-new-snapshot |
def port(self, value=None):
"""
Return or set the port
:param string value: the new port to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
return URL._mutate(self, port=value)
return self._tuple.port | Return or set the port
:param string value: the new port to use
:returns: string or new :class:`URL` instance |
def check_is_spam(content, content_object, request,
backends=None):
"""
Return True if the content is a spam, else False.
"""
if backends is None:
backends = SPAM_CHECKER_BACKENDS
for backend_path in backends:
spam_checker = get_spam_checker(backend_path)
if spam_checker is not None:
is_spam = spam_checker(content, content_object, request)
if is_spam:
return True
return False | Return True if the content is a spam, else False. |
def module_ids(self, rev=False):
"""Gets a list of module ids guaranteed to be sorted by run_order, ignoring conn modules
(run order < 0).
"""
shutit_global.shutit_global_object.yield_to_draw()
ids = sorted(list(self.shutit_map.keys()),key=lambda module_id: self.shutit_map[module_id].run_order)
if rev:
return list(reversed(ids))
return ids | Gets a list of module ids guaranteed to be sorted by run_order, ignoring conn modules
(run order < 0). |
def create_environment(component_config):
"""
Create a modified environment.
Arguments
component_config - The configuration for a component.
"""
ret = os.environ.copy()
for env in component_config.get_list("dp.env_list"):
real_env = env.upper()
value = os.environ.get(real_env)
value = _prepend_env(component_config, env, value)
value = _append_env(component_config, env, value)
_apply_change(ret, real_env, value, component_config)
return ret | Create a modified environment.
Arguments
component_config - The configuration for a component. |
def run(self, endpoint: str, loop: AbstractEventLoop = None):
"""
Run server main task.
:param endpoint: Socket endpoint to listen to, e.g. "tcp://*:1234"
:param loop: Event loop to run server in (alternatively just use run_async method)
"""
if not loop:
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(self.run_async(endpoint))
except KeyboardInterrupt:
self._shutdown() | Run server main task.
:param endpoint: Socket endpoint to listen to, e.g. "tcp://*:1234"
:param loop: Event loop to run server in (alternatively just use run_async method) |
def sorted_enums(self) -> List[Tuple[str, int]]:
"""Return list of enum items sorted by value."""
return sorted(self.enum.items(), key=lambda x: x[1]) | Return list of enum items sorted by value. |
def attribute_path(self, attribute, missing=None, visitor=None):
""" Generates a list of values of the `attribute` of all ancestors of
this node (as well as the node itself). If a value is ``None``, then
the optional value of `missing` is used (by default ``None``).
By default, the ``getattr(node, attribute, None) or missing``
mechanism is used to obtain the value of the attribute for each
node. This can be overridden by supplying a custom `visitor`
function, which expects as arguments the node and the attribute, and
should return an appropriate value for the required attribute.
:param attribute: the name of the attribute.
:param missing: optional value to use when attribute value is None.
:param visitor: optional function responsible for obtaining the
attribute value from a node.
:return: a list of values of the required `attribute` of the
ancestor path of this node.
"""
_parameters = {"node": self, "attribute": attribute}
if missing is not None:
_parameters["missing"] = missing
if visitor is not None:
_parameters["visitor"] = visitor
return self.__class__.objects.attribute_path(**_parameters) | Generates a list of values of the `attribute` of all ancestors of
this node (as well as the node itself). If a value is ``None``, then
the optional value of `missing` is used (by default ``None``).
By default, the ``getattr(node, attribute, None) or missing``
mechanism is used to obtain the value of the attribute for each
node. This can be overridden by supplying a custom `visitor`
function, which expects as arguments the node and the attribute, and
should return an appropriate value for the required attribute.
:param attribute: the name of the attribute.
:param missing: optional value to use when attribute value is None.
:param visitor: optional function responsible for obtaining the
attribute value from a node.
:return: a list of values of the required `attribute` of the
ancestor path of this node. |
def create_comment(self, access_token, video_id, content,
reply_id=None, captcha_key=None, captcha_text=None):
"""doc: http://open.youku.com/docs/doc?id=41
"""
url = 'https://openapi.youku.com/v2/comments/create.json'
data = {
'client_id': self.client_id,
'access_token': access_token,
'video_id': video_id,
'content': content,
'reply_id': reply_id,
'captcha_key': captcha_key,
'captcha_text': captcha_text
}
data = remove_none_value(data)
r = requests.post(url, data=data)
check_error(r)
return r.json()['id'] | doc: http://open.youku.com/docs/doc?id=41 |
def elcm_profile_set(irmc_info, input_data):
"""send an eLCM request to set param values
To apply param values, a new session is spawned with status 'running'.
When values are applied or error, the session ends.
:param irmc_info: node info
:param input_data: param values to apply, eg.
{
'Server':
{
'SystemConfig':
{
'BiosConfig':
{
'@Processing': 'execute',
-- config data --
}
}
}
}
:returns: dict object of session info if succeed
{
'Session':
{
'Id': id
'Status': 'activated'
...
}
}
:raises: SCCIClientError if SCCI failed
"""
# Prepare the data to apply
if isinstance(input_data, dict):
data = jsonutils.dumps(input_data)
else:
data = input_data
# Send POST request to the server
# NOTE: This task may take time, so set a timeout
_irmc_info = dict(irmc_info)
_irmc_info['irmc_client_timeout'] = PROFILE_SET_TIMEOUT
content_type = 'application/x-www-form-urlencoded'
if input_data['Server'].get('HWConfigurationIrmc'):
content_type = 'application/json'
resp = elcm_request(_irmc_info,
method='POST',
path=URL_PATH_PROFILE_MGMT + 'set',
headers={'Content-type': content_type},
data=data)
if resp.status_code == 202:
return _parse_elcm_response_body_as_json(resp)
else:
raise scci.SCCIClientError(('Failed to apply param values with '
'error code %(error)s' %
{'error': resp.status_code})) | send an eLCM request to set param values
To apply param values, a new session is spawned with status 'running'.
When values are applied or error, the session ends.
:param irmc_info: node info
:param input_data: param values to apply, eg.
{
'Server':
{
'SystemConfig':
{
'BiosConfig':
{
'@Processing': 'execute',
-- config data --
}
}
}
}
:returns: dict object of session info if succeed
{
'Session':
{
'Id': id
'Status': 'activated'
...
}
}
:raises: SCCIClientError if SCCI failed |
def open_buffer(self, location=None, show_in_current_window=False):
"""
Open/create a file, load it, and show it in a new buffer.
"""
eb = self._get_or_create_editor_buffer(location)
if show_in_current_window:
self.show_editor_buffer(eb) | Open/create a file, load it, and show it in a new buffer. |
def to_api_repr(self):
"""Construct the API resource representation of this access entry
Returns:
Dict[str, object]: Access entry represented as an API resource
"""
resource = {self.entity_type: self.entity_id}
if self.role is not None:
resource["role"] = self.role
return resource | Construct the API resource representation of this access entry
Returns:
Dict[str, object]: Access entry represented as an API resource |
def copy_and_run(config, src_dir):
'''
Local-only operation of the executor.
Intended for validation script developers,
and the test suite.
Please not that this function only works correctly
if the validator has one of the following names:
- validator.py
- validator.zip
Returns True when a job was prepared and executed.
Returns False when no job could be prepared.
'''
job = fake_fetch_job(config, src_dir)
if job:
job._run_validate()
return True
else:
return False | Local-only operation of the executor.
Intended for validation script developers,
and the test suite.
Please not that this function only works correctly
if the validator has one of the following names:
- validator.py
- validator.zip
Returns True when a job was prepared and executed.
Returns False when no job could be prepared. |
def maxlike(self,nseeds=50):
"""Returns the best-fit parameters, choosing the best of multiple starting guesses
:param nseeds: (optional)
Number of starting guesses, uniformly distributed throughout
allowed ranges. Default=50.
:return:
list of best-fit parameters: ``[m,age,feh,[distance,A_V]]``.
Note that distance and A_V values will be meaningless unless
magnitudes are present in ``self.properties``.
"""
m0,age0,feh0 = self.ic.random_points(nseeds)
d0 = 10**(rand.uniform(0,np.log10(self.max_distance),size=nseeds))
AV0 = rand.uniform(0,self.maxAV,size=nseeds)
costs = np.zeros(nseeds)
if self.fit_for_distance:
pfits = np.zeros((nseeds,5))
else:
pfits = np.zeros((nseeds,3))
def fn(p): #fmin is a function *minimizer*
return -1*self.lnpost(p)
for i,m,age,feh,d,AV in zip(range(nseeds),
m0,age0,feh0,d0,AV0):
if self.fit_for_distance:
pfit = scipy.optimize.fmin(fn,[m,age,feh,d,AV],disp=False)
else:
pfit = scipy.optimize.fmin(fn,[m,age,feh],disp=False)
pfits[i,:] = pfit
costs[i] = self.lnpost(pfit)
return pfits[np.argmax(costs),:] | Returns the best-fit parameters, choosing the best of multiple starting guesses
:param nseeds: (optional)
Number of starting guesses, uniformly distributed throughout
allowed ranges. Default=50.
:return:
list of best-fit parameters: ``[m,age,feh,[distance,A_V]]``.
Note that distance and A_V values will be meaningless unless
magnitudes are present in ``self.properties``. |
def get_repositories(self, digests):
"""
Build the repositories metadata
:param digests: dict, image -> digests
"""
if self.workflow.push_conf.pulp_registries:
# If pulp was used, only report pulp images
registries = self.workflow.push_conf.pulp_registries
else:
# Otherwise report all the images we pushed
registries = self.workflow.push_conf.all_registries
output_images = []
for registry in registries:
image = self.pullspec_image.copy()
image.registry = registry.uri
pullspec = image.to_str()
output_images.append(pullspec)
digest_list = digests.get(image.to_str(registry=False), ())
for digest in digest_list:
digest_pullspec = image.to_str(tag=False) + "@" + digest
output_images.append(digest_pullspec)
return output_images | Build the repositories metadata
:param digests: dict, image -> digests |
def read_handshake(self):
"""Read and process an initial handshake message from Storm."""
msg = self.read_message()
pid_dir, _conf, _context = msg["pidDir"], msg["conf"], msg["context"]
# Write a blank PID file out to the pidDir
open(join(pid_dir, str(self.pid)), "w").close()
self.send_message({"pid": self.pid})
return _conf, _context | Read and process an initial handshake message from Storm. |
def _login(self, max_tries=2):
"""Logs in to Kindle Cloud Reader.
Args:
max_tries: The maximum number of login attempts that will be made.
Raises:
BrowserError: If method called when browser not at a signin URL.
LoginError: If login unsuccessful after `max_tries` attempts.
"""
if not self.current_url.startswith(_KindleCloudReaderBrowser._SIGNIN_URL):
raise BrowserError(
'Current url "%s" is not a signin url ("%s")' %
(self.current_url, _KindleCloudReaderBrowser._SIGNIN_URL))
email_field_loaded = lambda br: br.find_elements_by_id('ap_email')
self._wait().until(email_field_loaded)
tries = 0
while tries < max_tries:
# Enter the username
email_elem = self.find_element_by_id('ap_email')
email_elem.clear()
email_elem.send_keys(self._uname)
# Enter the password
pword_elem = self.find_element_by_id('ap_password')
pword_elem.clear()
pword_elem.send_keys(self._pword)
def creds_entered(_):
"""Returns whether the credentials were properly entered."""
email_ok = email_elem.get_attribute('value') == self._uname
pword_ok = pword_elem.get_attribute('value') == self._pword
return email_ok and pword_ok
kcr_page_loaded = lambda br: br.title == u'Kindle Cloud Reader'
try:
self._wait(5).until(creds_entered)
self.find_element_by_id('signInSubmit-input').click()
self._wait(5).until(kcr_page_loaded)
except TimeoutException:
tries += 1
else:
return
raise LoginError | Logs in to Kindle Cloud Reader.
Args:
max_tries: The maximum number of login attempts that will be made.
Raises:
BrowserError: If method called when browser not at a signin URL.
LoginError: If login unsuccessful after `max_tries` attempts. |
def sort_topologically(dag):
"""Sort the dag breath first topologically.
Only the nodes inside the dag are returned, i.e. the nodes that are also keys.
Returns:
a topological ordering of the DAG.
Raises:
an error if this is not possible (graph is not valid).
"""
dag = copy.deepcopy(dag)
sorted_nodes = []
independent_nodes = deque(get_independent_nodes(dag))
while independent_nodes:
node = independent_nodes.popleft()
sorted_nodes.append(node)
# this alters the dag so that we are sure we are visiting the nodes only once
downstream_nodes = dag[node]
while downstream_nodes:
downstream_node = downstream_nodes.pop(0)
if downstream_node not in dag:
continue
if not has_dependencies(downstream_node, dag):
independent_nodes.append(downstream_node)
if len(sorted_nodes) != len(dag.keys()):
raise ValueError('graph is not acyclic')
return sorted_nodes | Sort the dag breath first topologically.
Only the nodes inside the dag are returned, i.e. the nodes that are also keys.
Returns:
a topological ordering of the DAG.
Raises:
an error if this is not possible (graph is not valid). |
def connect(self):
"""Connect to the Redis server if necessary.
:rtype: :class:`~tornado.concurrent.Future`
:raises: :class:`~tredis.exceptions.ConnectError`
:class:`~tredis.exceptinos.RedisError`
"""
future = concurrent.Future()
if self.connected:
raise exceptions.ConnectError('already connected')
LOGGER.debug('%s connecting', self.name)
self.io_loop.add_future(
self._client.connect(self.host, self.port),
lambda f: self._on_connected(f, future))
return future | Connect to the Redis server if necessary.
:rtype: :class:`~tornado.concurrent.Future`
:raises: :class:`~tredis.exceptions.ConnectError`
:class:`~tredis.exceptinos.RedisError` |
def _read_credential_file(self, cfg):
"""
Implements the default (keystone) behavior.
"""
self.username = cfg.get("keystone", "username")
self.password = cfg.get("keystone", "password", raw=True)
self.tenant_id = cfg.get("keystone", "tenant_id") | Implements the default (keystone) behavior. |
def get_readwrite_instance(cls, working_dir, restore=False, restore_block_height=None):
"""
Get a read/write instance to the db, without the singleton check.
Used for low-level operations like db restore.
Not used in the steady state behavior of the system.
"""
log.warning("!!! Getting raw read/write DB instance !!!")
import virtualchain_hooks
db_path = virtualchain.get_db_filename(virtualchain_hooks, working_dir)
db = BlockstackDB(db_path, DISPOSITION_RW, working_dir, get_genesis_block())
rc = db.db_setup()
if not rc:
if restore:
# restore from backup instead of bailing out
log.debug("Restoring from unclean shutdown")
rc = db.db_restore(block_number=restore_block_height)
if rc:
return db
else:
log.error("Failed to restore from unclean shutdown")
db.close()
raise Exception("Failed to set up db")
return db | Get a read/write instance to the db, without the singleton check.
Used for low-level operations like db restore.
Not used in the steady state behavior of the system. |
def purge_old_files(date_time, directory_path, custom_prefix="backup"):
""" Takes a datetime object and a directory path, runs through files in the
directory and deletes those tagged with a date from before the provided
datetime.
If your backups have a custom_prefix that is not the default ("backup"),
provide it with the "custom_prefix" kwarg. """
for file_name in listdir(directory_path):
try:
file_date_time = get_backup_file_time_tag(file_name, custom_prefix=custom_prefix)
except ValueError as e:
if "does not match format" in e.message:
print("WARNING. file(s) in %s do not match naming convention."
% (directory_path))
continue
raise e
if file_date_time < date_time:
remove(directory_path + file_name) | Takes a datetime object and a directory path, runs through files in the
directory and deletes those tagged with a date from before the provided
datetime.
If your backups have a custom_prefix that is not the default ("backup"),
provide it with the "custom_prefix" kwarg. |
def index(config): # pragma: no cover
"""Display group info in raw format."""
client = Client()
client.prepare_connection()
group_api = API(client)
print(group_api.index()) | Display group info in raw format. |
def afterqc_general_stats_table(self):
""" Take the parsed stats from the Afterqc report and add it to the
General Statistics table at the top of the report """
headers = OrderedDict()
headers['pct_good_bases'] = {
'title': '% Good Bases',
'description': 'Percent Good Bases',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'BuGn',
}
headers['good_reads'] = {
'title': '{} Good Reads'.format(config.read_count_prefix),
'description': 'Good Reads ({})'.format(config.read_count_desc),
'min': 0,
'modify': lambda x: x * config.read_count_multiplier,
'scale': 'GnBu',
'shared_key': 'read_count'
}
headers['total_reads'] = {
'title': '{} Total Reads'.format(config.read_count_prefix),
'description': 'Total Reads ({})'.format(config.read_count_desc),
'min': 0,
'modify': lambda x: x * config.read_count_multiplier,
'scale': 'Blues',
'shared_key': 'read_count'
}
headers['readlen'] = {
'title': 'Read Length',
'description': 'Read Length',
'min': 0,
'suffix': ' bp',
'format': '{:,.0f}',
'scale': 'YlGn'
}
self.general_stats_addcols(self.afterqc_data, headers) | Take the parsed stats from the Afterqc report and add it to the
General Statistics table at the top of the report |
def _set_buttons(self, chat, bot):
"""
Helper methods to set the buttons given the input sender and chat.
"""
if isinstance(self.reply_markup, (
types.ReplyInlineMarkup, types.ReplyKeyboardMarkup)):
self._buttons = [[
MessageButton(self._client, button, chat, bot, self.id)
for button in row.buttons
] for row in self.reply_markup.rows]
self._buttons_flat = [x for row in self._buttons for x in row] | Helper methods to set the buttons given the input sender and chat. |
def get_place_tags(index_page, domain): #: TODO geoip to docstring
"""
Return list of `place` tags parsed from `meta` and `whois`.
Args:
index_page (str): HTML content of the page you wisht to analyze.
domain (str): Domain of the web, without ``http://`` or other parts.
Returns:
list: List of :class:`.SourceString` objects.
"""
ip_address = get_ip_address(domain)
dom = dhtmlparser.parseString(index_page)
place_tags = [
get_html_geo_place_tags(dom),
get_whois_tags(ip_address),
# [_get_geo_place_tag(ip_address)], # TODO: implement geoip
]
return sum(place_tags, []) | Return list of `place` tags parsed from `meta` and `whois`.
Args:
index_page (str): HTML content of the page you wisht to analyze.
domain (str): Domain of the web, without ``http://`` or other parts.
Returns:
list: List of :class:`.SourceString` objects. |
def create_resource(output_model, rtype, unique, links, existing_ids=None, id_helper=None):
'''
General-purpose routine to create a new resource in the output model, based on data provided
output_model - Versa connection to model to be updated
rtype - Type IRI for the new resource, set with Versa type
unique - list of key/value pairs for determining a unique hash for the new resource
links - list of key/value pairs for setting properties on the new resource
id_helper - If a string, a base URL for the generatd ID. If callable, a function used to return the entity. If None, set a default good enough for testing.
existing_ids - set of existing IDs to not recreate, or None, in which case a new resource will always be created
'''
if isinstance(id_helper, str):
idg = idgen(id_helper)
elif isinstance(id_helper, GeneratorType):
idg = id_helper
elif id_helper is None:
idg = default_idgen(None)
else:
#FIXME: G11N
raise ValueError('id_helper must be string (URL), callable or None')
ctx = context(None, None, output_model, base=None, idgen=idg, existing_ids=existing_ids, extras=None)
rid = I(materialize_entity(ctx, rtype, unique=unique))
if existing_ids is not None:
if rid in existing_ids:
return (False, rid)
existing_ids.add(rid)
output_model.add(rid, VTYPE_REL, rtype)
for r, t in links:
output_model.add(rid, r, t)
return (True, rid) | General-purpose routine to create a new resource in the output model, based on data provided
output_model - Versa connection to model to be updated
rtype - Type IRI for the new resource, set with Versa type
unique - list of key/value pairs for determining a unique hash for the new resource
links - list of key/value pairs for setting properties on the new resource
id_helper - If a string, a base URL for the generatd ID. If callable, a function used to return the entity. If None, set a default good enough for testing.
existing_ids - set of existing IDs to not recreate, or None, in which case a new resource will always be created |
def dec2dms(dec):
"""
ADW: This should really be replaced by astropy
"""
DEGREE = 360.
HOUR = 24.
MINUTE = 60.
SECOND = 3600.
dec = float(dec)
sign = np.copysign(1.0,dec)
fdeg = np.abs(dec)
deg = int(fdeg)
fminute = (fdeg - deg)*MINUTE
minute = int(fminute)
second = (fminute - minute)*MINUTE
deg = int(deg * sign)
return (deg, minute, second) | ADW: This should really be replaced by astropy |
def load_params_from_file(self, fname: str):
"""
Loads and sets model parameters from file.
:param fname: Path to load parameters from.
"""
utils.check_condition(os.path.exists(fname), "No model parameter file found under %s. "
"This is either not a model directory or the first training "
"checkpoint has not happened yet." % fname)
self.params, self.aux_params = utils.load_params(fname)
utils.check_condition(all(name.startswith(self.prefix) for name in self.params.keys()),
"Not all parameter names start with model prefix '%s'" % self.prefix)
utils.check_condition(all(name.startswith(self.prefix) for name in self.aux_params.keys()),
"Not all auxiliary parameter names start with model prefix '%s'" % self.prefix)
logger.info('Loaded params from "%s"', fname) | Loads and sets model parameters from file.
:param fname: Path to load parameters from. |
def __del_running_bp(self, tid, bp):
"Auxiliary method."
self.__runningBP[tid].remove(bp)
if not self.__runningBP[tid]:
del self.__runningBP[tid] | Auxiliary method. |
def to_det_oid(self, det_id_or_det_oid):
"""Convert det OID or ID to det OID"""
try:
int(det_id_or_det_oid)
except ValueError:
return det_id_or_det_oid
else:
return self.get_det_oid(det_id_or_det_oid) | Convert det OID or ID to det OID |
def install_sql(self, site=None, database='default', apps=None, stop_on_error=0, fn=None):
"""
Installs all custom SQL.
"""
#from burlap.db import load_db_set
stop_on_error = int(stop_on_error)
site = site or ALL
name = database
r = self.local_renderer
paths = glob.glob(r.format(r.env.install_sql_path_template))
apps = [_ for _ in (apps or '').split(',') if _.strip()]
if self.verbose:
print('install_sql.apps:', apps)
def cmp_paths(d0, d1):
if d0[1] and d0[1] in d1[2]:
return -1
if d1[1] and d1[1] in d0[2]:
return +1
return cmp(d0[0], d1[0])
def get_paths(t):
"""
Returns SQL file paths in an execution order that respect dependencies.
"""
data = [] # [(path, view_name, content)]
for path in paths:
if fn and fn not in path:
continue
parts = path.split('.')
if len(parts) == 3 and parts[1] != t:
continue
if not path.lower().endswith('.sql'):
continue
content = open(path, 'r').read()
matches = re.findall(r'[\s\t]+VIEW[\s\t]+([a-zA-Z0-9_]{3,})', content, flags=re.IGNORECASE)
view_name = ''
if matches:
view_name = matches[0]
print('Found view %s.' % view_name)
data.append((path, view_name, content))
for d in sorted(data, cmp=cmp_paths):
yield d[0]
def run_paths(paths, cmd_template, max_retries=3):
r = self.local_renderer
paths = list(paths)
error_counts = defaultdict(int) # {path:count}
terminal = set()
if self.verbose:
print('Checking %i paths.' % len(paths))
while paths:
path = paths.pop(0)
if self.verbose:
print('path:', path)
app_name = re.findall(r'/([^/]+)/sql/', path)[0]
if apps and app_name not in apps:
self.vprint('skipping because app_name %s not in apps' % app_name)
continue
with self.settings(warn_only=True):
if self.is_local:
r.env.sql_path = path
else:
r.env.sql_path = '/tmp/%s' % os.path.split(path)[-1]
r.put(local_path=path, remote_path=r.env.sql_path)
ret = r.run_or_local(cmd_template)
if ret and ret.return_code:
if stop_on_error:
raise Exception('Unable to execute file %s' % path)
error_counts[path] += 1
if error_counts[path] < max_retries:
paths.append(path)
else:
terminal.add(path)
if terminal:
print('%i files could not be loaded.' % len(terminal), file=sys.stderr)
for path in sorted(list(terminal)):
print(path, file=sys.stderr)
print(file=sys.stderr)
if self.verbose:
print('install_sql.db_engine:', r.env.db_engine)
for _site, site_data in self.iter_sites(site=site, no_secure=True):
self.set_db(name=name, site=_site)
if 'postgres' in r.env.db_engine or 'postgis' in r.env.db_engine:
paths = list(get_paths('postgresql'))
run_paths(
paths=paths,
cmd_template="psql --host={db_host} --user={db_user} --no-password -d {db_name} -f {sql_path}")
elif 'mysql' in r.env.db_engine:
paths = list(get_paths('mysql'))
run_paths(
paths=paths,
cmd_template="mysql -v -h {db_host} -u {db_user} -p'{db_password}' {db_name} < {sql_path}")
else:
raise NotImplementedError | Installs all custom SQL. |
def updateTable(self, networkId, tableType, body, class_, verbose=None):
"""
Updates the table specified by the `tableType` and `networkId` parameters. New columns will be created if they do not exist in the target table.
Current limitations:
* Numbers are handled as Double
* List column is not supported in this version
:param networkId: SUID containing the table
:param tableType: Type of table
:param body: The data with which to update the table.
:param class_: None -- Not required, can be None
:param verbose: print more
:returns: default: successful operation
"""
response=api(url=self.___url+'networks/'+str(networkId)+'/tables/'+str(tableType)+'', method="PUT", body=body, verbose=verbose)
return response | Updates the table specified by the `tableType` and `networkId` parameters. New columns will be created if they do not exist in the target table.
Current limitations:
* Numbers are handled as Double
* List column is not supported in this version
:param networkId: SUID containing the table
:param tableType: Type of table
:param body: The data with which to update the table.
:param class_: None -- Not required, can be None
:param verbose: print more
:returns: default: successful operation |
def _check_endings(self):
"""Check begin/end of slug, raises Error if malformed."""
if self.slug.startswith("/") and self.slug.endswith("/"):
raise InvalidSlugError(
_("Invalid slug. Did you mean {}, without the leading and trailing slashes?".format(self.slug.strip("/"))))
elif self.slug.startswith("/"):
raise InvalidSlugError(
_("Invalid slug. Did you mean {}, without the leading slash?".format(self.slug.strip("/"))))
elif self.slug.endswith("/"):
raise InvalidSlugError(
_("Invalid slug. Did you mean {}, without the trailing slash?".format(self.slug.strip("/")))) | Check begin/end of slug, raises Error if malformed. |
def _default_verify_function(instance, answer, result_host, atol, verbose):
"""default verify function based on numpy.allclose"""
#first check if the length is the same
if len(instance.arguments) != len(answer):
raise TypeError("The length of argument list and provided results do not match.")
#for each element in the argument list, check if the types match
for i, arg in enumerate(instance.arguments):
if answer[i] is not None: #skip None elements in the answer list
if isinstance(answer[i], numpy.ndarray) and isinstance(arg, numpy.ndarray):
if answer[i].dtype != arg.dtype:
raise TypeError("Element " + str(i)
+ " of the expected results list is not of the same dtype as the kernel output: "
+ str(answer[i].dtype) + " != " + str(arg.dtype) + ".")
if answer[i].size != arg.size:
raise TypeError("Element " + str(i)
+ " of the expected results list has a size different from "
+ "the kernel argument: "
+ str(answer[i].size) + " != " + str(arg.size) + ".")
elif isinstance(answer[i], numpy.number) and isinstance(arg, numpy.number):
if answer[i].dtype != arg.dtype:
raise TypeError("Element " + str(i)
+ " of the expected results list is not the same as the kernel output: "
+ str(answer[i].dtype) + " != " + str(arg.dtype) + ".")
else:
#either answer[i] and argument have different types or answer[i] is not a numpy type
if not isinstance(answer[i], numpy.ndarray) or not isinstance(answer[i], numpy.number):
raise TypeError("Element " + str(i)
+ " of expected results list is not a numpy array or numpy scalar.")
else:
raise TypeError("Element " + str(i)
+ " of expected results list and kernel arguments have different types.")
def _ravel(a):
if hasattr(a, 'ravel') and len(a.shape) > 1:
return a.ravel()
return a
def _flatten(a):
if hasattr(a, 'flatten'):
return a.flatten()
return a
correct = True
for i, arg in enumerate(instance.arguments):
expected = answer[i]
if expected is not None:
result = _ravel(result_host[i])
expected = _flatten(expected)
output_test = numpy.allclose(expected, result, atol=atol)
if not output_test and verbose:
print("Error: " + util.get_config_string(instance.params) + " detected during correctness check")
print("this error occured when checking value of the %oth kernel argument" % (i,))
print("Printing kernel output and expected result, set verbose=False to suppress this debug print")
numpy.set_printoptions(edgeitems=50)
print("Kernel output:")
print(result)
print("Expected:")
print(expected)
correct = correct and output_test
if not correct:
logging.debug('correctness check has found a correctness issue')
raise Exception("Error: " + util.get_config_string(instance.params) + " failed correctness check")
return correct | default verify function based on numpy.allclose |
def Drop(self: dict, n):
"""
[
{
'self': [1, 2, 3, 4, 5],
'n': 3,
'assert': lambda ret: list(ret) == [1, 2]
}
]
"""
n = len(self) - n
if n <= 0:
yield from self.items()
else:
for i, e in enumerate(self.items()):
if i == n:
break
yield e | [
{
'self': [1, 2, 3, 4, 5],
'n': 3,
'assert': lambda ret: list(ret) == [1, 2]
}
] |
def ToPath(self):
"""Converts a reference into a VFS file path."""
if self.path_type == PathInfo.PathType.OS:
return os.path.join("fs", "os", *self.path_components)
elif self.path_type == PathInfo.PathType.TSK:
return os.path.join("fs", "tsk", *self.path_components)
elif self.path_type == PathInfo.PathType.REGISTRY:
return os.path.join("registry", *self.path_components)
elif self.path_type == PathInfo.PathType.TEMP:
return os.path.join("temp", *self.path_components)
raise ValueError("Unsupported path type: %s" % self.path_type) | Converts a reference into a VFS file path. |
def run(self):
"""Sends extracted metadata in json format to stdout if stdout
option is specified, assigns metadata dictionary to class_metadata
variable otherwise.
"""
if self.stdout:
sys.stdout.write("extracted json data:\n" + json.dumps(
self.metadata, default=to_str) + "\n")
else:
extract_dist.class_metadata = self.metadata | Sends extracted metadata in json format to stdout if stdout
option is specified, assigns metadata dictionary to class_metadata
variable otherwise. |
def query(ra ,dec, rad=0.1, query=None):
"""Query the CADC TAP service to determine the list of images for the
NewHorizons Search. Things to determine:
a- Images to have the reference subtracted from.
b- Image to use as the 'REFERENCE' image.
c- Images to be used for input into the reference image
Logic: Given a particular Image/CCD find all the CCDs of the same field that
overlap that CCD but are taken more than 7 days later or earlier than
that image.
"""
if query is None:
query=( """ SELECT """
""" "II/246/out".raj2000 as ra, "II/246/out".dej2000 as dec, "II/246/out".jmag as jmag """
""" FROM "II/246/out" """
""" WHERE """
""" CONTAINS(POINT('ICRS', raj2000, dej2000), CIRCLE('ICRS', {}, {}, {})) = 1 """.format(ra,dec,rad) )
tapURL = "http://TAPVizieR.u-strasbg.fr/TAPVizieR/tap/sync"
## Some default parameters for that TAP service queries.
tapParams={'REQUEST': 'doQuery',
'LANG': 'ADQL',
'FORMAT': 'votable',
'QUERY': query}
response = requests.get(tapURL, params=tapParams)
data = StringIO(response.text)
data.seek(0)
data.seek(0)
T = votable.parse_single_table(data).to_table()
return T | Query the CADC TAP service to determine the list of images for the
NewHorizons Search. Things to determine:
a- Images to have the reference subtracted from.
b- Image to use as the 'REFERENCE' image.
c- Images to be used for input into the reference image
Logic: Given a particular Image/CCD find all the CCDs of the same field that
overlap that CCD but are taken more than 7 days later or earlier than
that image. |
def delete_guest(userid):
""" Destroy a virtual machine.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
"""
# Check if the guest exists.
guest_list_info = client.send_request('guest_list')
# the string 'userid' need to be coded as 'u'userid' in case of py2 interpreter.
userid_1 = (unicode(userid, "utf-8") if sys.version[0] == '2' else userid)
if userid_1 not in guest_list_info['output']:
RuntimeError("Userid %s does not exist!" % userid)
# Delete the guest.
guest_delete_info = client.send_request('guest_delete', userid)
if guest_delete_info['overallRC']:
print("\nFailed to delete guest %s!" % userid)
else:
print("\nSucceeded to delete guest %s!" % userid) | Destroy a virtual machine.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8 |
def add_key_filter(self, *args):
"""
Add a single key filter to the inputs.
:param args: a filter
:type args: list
:rtype: :class:`RiakMapReduce`
"""
if self._input_mode == 'query':
raise ValueError('Key filters are not supported in a query.')
self._key_filters.append(args)
return self | Add a single key filter to the inputs.
:param args: a filter
:type args: list
:rtype: :class:`RiakMapReduce` |
def GroupsUsersPost(self, parameters, group_id):
"""
Add users to a group in CommonSense.
@param parameters (dictonary) - Dictionary containing the users to add.
@return (bool) - Boolean indicating whether GroupsPost was successful.
"""
if self.__SenseApiCall__('/groups/{group_id}/users.json'.format(group_id = group_id), 'POST', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False | Add users to a group in CommonSense.
@param parameters (dictonary) - Dictionary containing the users to add.
@return (bool) - Boolean indicating whether GroupsPost was successful. |
def _run_incremental_transforms(self, si, transforms):
'''
Run transforms on stream item.
Item may be discarded by some transform.
Writes successful items out to current self.t_chunk
Returns transformed item or None.
'''
## operate each transform on this one StreamItem
for transform in transforms:
try:
stream_id = si.stream_id
si_new = transform(si, context=self.context)
if si_new is None:
logger.warn('transform %r deleted %s abs_url=%r',
transform, stream_id, si and si.abs_url)
return None
si = si_new
except TransformGivingUp:
## do nothing
logger.info('transform %r giving up on %r',
transform, si.stream_id)
except Exception, exc:
logger.critical(
'transform %r failed on %r from i_str=%r abs_url=%r',
transform, si and si.stream_id, self.context.get('i_str'),
si and si.abs_url, exc_info=True)
assert si is not None
## expect to always have a stream_time
if not si.stream_time:
raise InvalidStreamItem('empty stream_time: %s' % si)
if si.stream_id is None:
raise InvalidStreamItem('empty stream_id: %r' % si)
## put the StreamItem into the output
if type(si) != streamcorpus.StreamItem_v0_3_0:
raise InvalidStreamItem('incorrect stream item object %r' %
type(si))
self.t_chunk.add(si)
return si | Run transforms on stream item.
Item may be discarded by some transform.
Writes successful items out to current self.t_chunk
Returns transformed item or None. |
def hour(self, value=None):
"""Corresponds to IDD Field `hour`
Args:
value (int): value for IDD Field `hour`
value >= 1
value <= 24
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError('value {} need to be of type int '
'for field `hour`'.format(value))
if value < 1:
raise ValueError('value need to be greater or equal 1 '
'for field `hour`')
if value > 24:
raise ValueError('value need to be smaller 24 '
'for field `hour`')
self._hour = value | Corresponds to IDD Field `hour`
Args:
value (int): value for IDD Field `hour`
value >= 1
value <= 24
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value |
def write_config_value_to_file(key, value, config_path=None):
"""Write key/value pair to config file.
"""
if config_path is None:
config_path = DEFAULT_CONFIG_PATH
# Get existing config.
config = _get_config_dict_from_file(config_path)
# Add/update the key/value pair.
config[key] = value
# Create parent directories if they are missing.
mkdir_parents(os.path.dirname(config_path))
# Write the content
with open(config_path, "w") as fh:
json.dump(config, fh, sort_keys=True, indent=2)
# Set 600 permissions on the config file.
os.chmod(config_path, 33216)
return get_config_value_from_file(key, config_path) | Write key/value pair to config file. |
def _client_builder(self):
"""Build Elasticsearch client."""
client_config = self.app.config.get('SEARCH_CLIENT_CONFIG') or {}
client_config.setdefault(
'hosts', self.app.config.get('SEARCH_ELASTIC_HOSTS'))
client_config.setdefault('connection_class', RequestsHttpConnection)
return Elasticsearch(**client_config) | Build Elasticsearch client. |
def get_bool_value(self, section, option, default=True):
"""Get the bool value of an option, if it exists."""
try:
return self.parser.getboolean(section, option)
except NoOptionError:
return bool(default) | Get the bool value of an option, if it exists. |
def begin(self, **options):
'''Begin a new :class:`Transaction`. If this :class:`Session`
is already in a :ref:`transactional state <transactional-state>`,
an error will occur. It returns the :attr:`transaction` attribute.
This method is mostly used within a ``with`` statement block::
with session.begin() as t:
t.add(...)
...
which is equivalent to::
t = session.begin()
t.add(...)
...
session.commit()
``options`` parameters are passed to the :class:`Transaction` constructor.
'''
if self.transaction is not None:
raise InvalidTransaction("A transaction is already begun.")
else:
self.transaction = Transaction(self, **options)
return self.transaction | Begin a new :class:`Transaction`. If this :class:`Session`
is already in a :ref:`transactional state <transactional-state>`,
an error will occur. It returns the :attr:`transaction` attribute.
This method is mostly used within a ``with`` statement block::
with session.begin() as t:
t.add(...)
...
which is equivalent to::
t = session.begin()
t.add(...)
...
session.commit()
``options`` parameters are passed to the :class:`Transaction` constructor. |
def send_notification(self, method, *args):
"""Send a JSON-RPC notification.
The notification *method* is sent with positional arguments *args*.
"""
message = self._version.create_request(method, args, notification=True)
self.send_message(message) | Send a JSON-RPC notification.
The notification *method* is sent with positional arguments *args*. |
def _ReadFloatingPointDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads a floating-point data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
FloatingPointDefinition: floating-point data type definition.
"""
return self._ReadFixedSizeDataTypeDefinition(
definitions_registry, definition_values,
data_types.FloatingPointDefinition, definition_name,
self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE,
is_member=is_member, supported_size_values=(4, 8)) | Reads a floating-point data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
FloatingPointDefinition: floating-point data type definition. |
def longitude(self, value=0.0):
"""Corresponds to IDD Field `longitude`
- is West, + is East, degree minutes represented in decimal (i.e. 30 minutes is .5)
Args:
value (float): value for IDD Field `longitude`
Unit: deg
Default value: 0.0
value >= -180.0
value <= 180.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `longitude`'.format(value))
if value < -180.0:
raise ValueError('value need to be greater or equal -180.0 '
'for field `longitude`')
if value > 180.0:
raise ValueError('value need to be smaller 180.0 '
'for field `longitude`')
self._longitude = value | Corresponds to IDD Field `longitude`
- is West, + is East, degree minutes represented in decimal (i.e. 30 minutes is .5)
Args:
value (float): value for IDD Field `longitude`
Unit: deg
Default value: 0.0
value >= -180.0
value <= 180.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value |
def parser_factory(fake_args=None):
"""Return a proper contextual OptionParser"""
parser = ArgumentParser(description='aomi')
subparsers = parser.add_subparsers(dest='operation',
help='Specify the data '
' or extraction operation')
extract_file_args(subparsers)
environment_args(subparsers)
aws_env_args(subparsers)
seed_args(subparsers)
render_args(subparsers)
diff_args(subparsers)
freeze_args(subparsers)
thaw_args(subparsers)
template_args(subparsers)
password_args(subparsers)
token_args(subparsers)
help_args(subparsers)
export_args(subparsers)
if fake_args is None:
return parser, parser.parse_args()
return parser, parser.parse_args(fake_args) | Return a proper contextual OptionParser |
def read_mda(attribute):
"""Read HDFEOS metadata and return a dict with all the key/value pairs."""
lines = attribute.split('\n')
mda = {}
current_dict = mda
path = []
prev_line = None
for line in lines:
if not line:
continue
if line == 'END':
break
if prev_line:
line = prev_line + line
key, val = line.split('=')
key = key.strip()
val = val.strip()
try:
val = eval(val)
except NameError:
pass
except SyntaxError:
prev_line = line
continue
prev_line = None
if key in ['GROUP', 'OBJECT']:
new_dict = {}
path.append(val)
current_dict[val] = new_dict
current_dict = new_dict
elif key in ['END_GROUP', 'END_OBJECT']:
if val != path[-1]:
raise SyntaxError
path = path[:-1]
current_dict = mda
for item in path:
current_dict = current_dict[item]
elif key in ['CLASS', 'NUM_VAL']:
pass
else:
current_dict[key] = val
return mda | Read HDFEOS metadata and return a dict with all the key/value pairs. |
def get_all_subdomains(offset=0, count=100, proxy=None, hostport=None):
"""
Get all subdomains within the given range.
Return the list of names on success
Return {'error': ...} on failure
"""
assert proxy or hostport, 'Need proxy or hostport'
if proxy is None:
proxy = connect_hostport(hostport)
offset = int(offset)
count = int(count)
page_schema = {
'type': 'object',
'properties': {
'names': {
'type': 'array',
'items': {
'type': 'string',
'uniqueItems': True
},
},
},
'required': [
'names',
],
}
schema = json_response_schema(page_schema)
try:
resp = proxy.get_all_subdomains(offset, count)
resp = json_validate(schema, resp)
if json_is_error(resp):
return resp
for name in resp['names']:
if not is_subdomain(str(name)):
raise ValidationError('Not a valid subdomain: {}'.format(str(name)))
except ValidationError as ve:
if BLOCKSTACK_DEBUG:
log.exception(ve)
resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502}
return resp
except socket.timeout:
log.error("Connection timed out")
resp = {'error': 'Connection to remote host timed out.', 'http_status': 503}
return resp
except socket.error as se:
log.error("Connection error {}".format(se.errno))
resp = {'error': 'Connection to remote host failed.', 'http_status': 502}
return resp
except Exception as ee:
if BLOCKSTACK_DEBUG:
log.exception(ee)
log.error("Caught exception while connecting to Blockstack node: {}".format(ee))
resp = {'error': 'Failed to contact Blockstack node. Try again with `--debug`.', 'http_status': 500}
return resp
return resp['names'] | Get all subdomains within the given range.
Return the list of names on success
Return {'error': ...} on failure |
def check_subdomain_transition(cls, existing_subrec, new_subrec):
"""
Given an existing subdomain record and a (newly-discovered) new subdomain record,
determine if we can use the new subdomain record (i.e. is its signature valid? is it in the right sequence?)
Return True if so
Return False if not
"""
if existing_subrec.get_fqn() != new_subrec.get_fqn():
return False
if existing_subrec.n + 1 != new_subrec.n:
return False
if not new_subrec.verify_signature(existing_subrec.address):
log.debug("Invalid signature from {}".format(existing_subrec.address))
return False
if virtualchain.address_reencode(existing_subrec.address) != virtualchain.address_reencode(new_subrec.address):
if new_subrec.independent:
log.debug("Transfer is independent of domain: {}".format(new_subrec))
return False
return True | Given an existing subdomain record and a (newly-discovered) new subdomain record,
determine if we can use the new subdomain record (i.e. is its signature valid? is it in the right sequence?)
Return True if so
Return False if not |
def _strict_match(self, struct1, struct2, fu, s1_supercell=True,
use_rms=False, break_on_match=False):
"""
Matches struct2 onto struct1 (which should contain all sites in
struct2).
Args:
struct1, struct2 (Structure): structures to be matched
fu (int): size of supercell to create
s1_supercell (bool): whether to create the supercell of
struct1 (vs struct2)
use_rms (bool): whether to minimize the rms of the matching
break_on_match (bool): whether to stop search at first
valid match
"""
if fu < 1:
raise ValueError("fu cannot be less than 1")
mask, s1_t_inds, s2_t_ind = self._get_mask(struct1, struct2,
fu, s1_supercell)
if mask.shape[0] > mask.shape[1]:
raise ValueError('after supercell creation, struct1 must '
'have more sites than struct2')
# check that a valid mapping exists
if (not self._subset) and mask.shape[1] != mask.shape[0]:
return None
if LinearAssignment(mask).min_cost > 0:
return None
best_match = None
# loop over all lattices
for s1fc, s2fc, avg_l, sc_m in \
self._get_supercells(struct1, struct2, fu, s1_supercell):
# compute fractional tolerance
normalization = (len(s1fc) / avg_l.volume) ** (1/3)
inv_abc = np.array(avg_l.reciprocal_lattice.abc)
frac_tol = inv_abc * self.stol / (np.pi * normalization)
# loop over all translations
for s1i in s1_t_inds:
t = s1fc[s1i] - s2fc[s2_t_ind]
t_s2fc = s2fc + t
if self._cmp_fstruct(s1fc, t_s2fc, frac_tol, mask):
inv_lll_abc = np.array(avg_l.get_lll_reduced_lattice().reciprocal_lattice.abc)
lll_frac_tol = inv_lll_abc * self.stol / (np.pi * normalization)
dist, t_adj, mapping = self._cart_dists(
s1fc, t_s2fc, avg_l, mask, normalization, lll_frac_tol)
if use_rms:
val = np.linalg.norm(dist) / len(dist) ** 0.5
else:
val = max(dist)
if best_match is None or val < best_match[0]:
total_t = t + t_adj
total_t -= np.round(total_t)
best_match = val, dist, sc_m, total_t, mapping
if (break_on_match or val < 1e-5) and val < self.stol:
return best_match
if best_match and best_match[0] < self.stol:
return best_match | Matches struct2 onto struct1 (which should contain all sites in
struct2).
Args:
struct1, struct2 (Structure): structures to be matched
fu (int): size of supercell to create
s1_supercell (bool): whether to create the supercell of
struct1 (vs struct2)
use_rms (bool): whether to minimize the rms of the matching
break_on_match (bool): whether to stop search at first
valid match |
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
self.log.debug('Authenticating nova user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
interface='publicURL')
if keystone.session:
return nova_client.Client(NOVA_CLIENT_VERSION,
session=keystone.session,
auth_url=ep)
elif novaclient.__version__[0] >= "7":
return nova_client.Client(NOVA_CLIENT_VERSION,
username=user, password=password,
project_name=tenant, auth_url=ep)
else:
return nova_client.Client(NOVA_CLIENT_VERSION,
username=user, api_key=password,
project_id=tenant, auth_url=ep) | Authenticates a regular user with nova-api. |
def _setup(self):
"Resets the state and prepares for running the example."
self.example.error = None
self.example.traceback = ''
# inject function contexts from parent functions
c = Context(parent=self.context)
#for parent in reversed(self.example.parents):
# c._update_properties(locals_from_function(parent))
self.context = c
if self.is_root_runner:
run.before_all.execute(self.context)
self.example.before(self.context) | Resets the state and prepares for running the example. |
def chartbeat_top(parser, token):
"""
Top Chartbeat template tag.
Render the top Javascript code for Chartbeat.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return ChartbeatTopNode() | Top Chartbeat template tag.
Render the top Javascript code for Chartbeat. |
def _is_executable_file(path):
"""Checks that path is an executable regular file, or a symlink towards one.
This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``.
This function was forked from pexpect originally:
Copyright (c) 2013-2014, Pexpect development team
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
# follow symlinks,
fpath = os.path.realpath(path)
if not os.path.isfile(fpath):
# non-files (directories, fifo, etc.)
return False
return os.access(fpath, os.X_OK) | Checks that path is an executable regular file, or a symlink towards one.
This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``.
This function was forked from pexpect originally:
Copyright (c) 2013-2014, Pexpect development team
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
def render(directory, opt):
"""Render any provided template. This includes the Secretfile,
Vault policies, and inline AWS roles"""
if not os.path.exists(directory) and not os.path.isdir(directory):
os.mkdir(directory)
a_secretfile = render_secretfile(opt)
s_path = "%s/Secretfile" % directory
LOG.debug("writing Secretfile to %s", s_path)
open(s_path, 'w').write(a_secretfile)
ctx = Context.load(yaml.safe_load(a_secretfile), opt)
for resource in ctx.resources():
if not resource.present:
continue
if issubclass(type(resource), Policy):
if not os.path.isdir("%s/policy" % directory):
os.mkdir("%s/policy" % directory)
filename = "%s/policy/%s" % (directory, resource.path)
open(filename, 'w').write(resource.obj())
LOG.debug("writing %s to %s", resource, filename)
elif issubclass(type(resource), AWSRole):
if not os.path.isdir("%s/aws" % directory):
os.mkdir("%s/aws" % directory)
if 'policy' in resource.obj():
filename = "%s/aws/%s" % (directory,
os.path.basename(resource.path))
r_obj = resource.obj()
if 'policy' in r_obj:
LOG.debug("writing %s to %s", resource, filename)
open(filename, 'w').write(r_obj['policy']) | Render any provided template. This includes the Secretfile,
Vault policies, and inline AWS roles |
def xform_key(self, key):
'''we transform cache keys by taking their sha1 hash so that
we don't need to worry about cache keys containing invalid
characters'''
newkey = hashlib.sha1(key.encode('utf-8'))
return newkey.hexdigest() | we transform cache keys by taking their sha1 hash so that
we don't need to worry about cache keys containing invalid
characters |
def is_classmethod(meth):
"""Detects if the given callable is a classmethod.
"""
if inspect.ismethoddescriptor(meth):
return isinstance(meth, classmethod)
if not inspect.ismethod(meth):
return False
if not inspect.isclass(meth.__self__):
return False
if not hasattr(meth.__self__, meth.__name__):
return False
return meth == getattr(meth.__self__, meth.__name__) | Detects if the given callable is a classmethod. |
def get(self, key, timeout=None):
"""Given a key, returns an element from the redis table"""
key = self.pre_identifier + key
# Check to see if we have this key
unpickled_entry = self.client.get(key)
if not unpickled_entry:
# No hit, return nothing
return None
entry = pickle.loads(unpickled_entry)
# Use provided timeout in arguments if provided
# otherwise use the one provided during init.
if timeout is None:
timeout = self.timeout
# Make sure entry is not expired
if self._is_expired(entry, timeout):
# entry expired, delete and return nothing
self.delete_entry(key)
return None
# entry found and not expired, return it
return entry[1] | Given a key, returns an element from the redis table |
def find_connected_atoms(struct, tolerance=0.45, ldict=JmolNN().el_radius):
"""
Finds the list of bonded atoms.
Args:
struct (Structure): Input structure
tolerance: length in angstroms used in finding bonded atoms. Two atoms are considered bonded if (radius of atom 1) + (radius of atom 2) + (tolerance) < (distance between atoms 1 and 2). Default value = 0.45, the value used by JMol and Cheon et al.
ldict: dictionary of bond lengths used in finding bonded atoms. Values from JMol are used as default
standardize: works with conventional standard structures if True. It is recommended to keep this as True.
Returns:
connected_list: A numpy array of shape (number of bonded pairs, 2); each row of is of the form [atomi, atomj].
atomi and atomj are the indices of the atoms in the input structure.
If any image of atomj is bonded to atomi with periodic boundary conditions, [atomi, atomj] is included in the list.
If atomi is bonded to multiple images of atomj, it is only counted once.
"""
n_atoms = len(struct.species)
fc = np.array(struct.frac_coords)
species = list(map(str, struct.species))
#in case of charged species
for i,item in enumerate(species):
if not item in ldict.keys():
species[i]=str(Specie.from_string(item).element)
latmat = struct.lattice.matrix
connected_list = []
for i in range(n_atoms):
for j in range(i + 1, n_atoms):
max_bond_length = ldict[species[i]] + ldict[species[j]] + tolerance
add_ij = False
for move_cell in itertools.product([0, 1, -1], [0, 1, -1], [0, 1, -1]):
if not add_ij:
frac_diff = fc[j] + move_cell - fc[i]
distance_ij = np.dot(latmat.T, frac_diff)
if np.linalg.norm(distance_ij) < max_bond_length:
add_ij = True
if add_ij:
connected_list.append([i, j])
return np.array(connected_list) | Finds the list of bonded atoms.
Args:
struct (Structure): Input structure
tolerance: length in angstroms used in finding bonded atoms. Two atoms are considered bonded if (radius of atom 1) + (radius of atom 2) + (tolerance) < (distance between atoms 1 and 2). Default value = 0.45, the value used by JMol and Cheon et al.
ldict: dictionary of bond lengths used in finding bonded atoms. Values from JMol are used as default
standardize: works with conventional standard structures if True. It is recommended to keep this as True.
Returns:
connected_list: A numpy array of shape (number of bonded pairs, 2); each row of is of the form [atomi, atomj].
atomi and atomj are the indices of the atoms in the input structure.
If any image of atomj is bonded to atomi with periodic boundary conditions, [atomi, atomj] is included in the list.
If atomi is bonded to multiple images of atomj, it is only counted once. |
def cmd(send, msg, args):
"""Reports the difference between now and some specified time.
Syntax: {command} <time>
"""
parser = arguments.ArgParser(args['config'])
parser.add_argument('date', nargs='*', action=arguments.DateParser)
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
if not cmdargs.date:
send("Time until when?")
return
delta = dateutil.relativedelta.relativedelta(cmdargs.date, datetime.datetime.now())
diff = "%s is " % cmdargs.date.strftime("%x")
if delta.years:
diff += "%d years " % (delta.years)
if delta.months:
diff += "%d months " % (delta.months)
if delta.days:
diff += "%d days " % (delta.days)
if delta.hours:
diff += "%d hours " % (delta.hours)
if delta.minutes:
diff += "%d minutes " % (delta.minutes)
if delta.seconds:
diff += "%d seconds " % (delta.seconds)
diff += "away"
send(diff) | Reports the difference between now and some specified time.
Syntax: {command} <time> |
def _normalize(value):
"""
Normalize handle values.
"""
if hasattr(value, 'value'):
value = value.value
if value is not None:
value = long(value)
return value | Normalize handle values. |
def frictional_resistance_coef(length, speed, **kwargs):
"""
Flat plate frictional resistance of the ship according to ITTC formula.
ref: https://ittc.info/media/2021/75-02-02-02.pdf
:param length: metres length of the vehicle
:param speed: m/s speed of the vehicle
:param kwargs: optional could take in temperature to take account change of water property
:return: Frictional resistance coefficient of the vehicle
"""
Cf = 0.075 / (np.log10(reynolds_number(length, speed, **kwargs)) - 2) ** 2
return Cf | Flat plate frictional resistance of the ship according to ITTC formula.
ref: https://ittc.info/media/2021/75-02-02-02.pdf
:param length: metres length of the vehicle
:param speed: m/s speed of the vehicle
:param kwargs: optional could take in temperature to take account change of water property
:return: Frictional resistance coefficient of the vehicle |
def check(self, instance):
"""
Process both the istio_mesh instance and process_mixer instance associated with this instance
"""
# Get the config for the istio_mesh instance
istio_mesh_endpoint = instance.get('istio_mesh_endpoint')
istio_mesh_config = self.config_map[istio_mesh_endpoint]
# Process istio_mesh
self.process(istio_mesh_config)
# Get the config for the process_mixer instance
process_mixer_endpoint = instance.get('mixer_endpoint')
process_mixer_config = self.config_map[process_mixer_endpoint]
# Process process_mixer
self.process(process_mixer_config) | Process both the istio_mesh instance and process_mixer instance associated with this instance |
def is_success(self):
''' check all sessions to see if they have completed successfully '''
for _session in self._sessions.values():
if not _session.is_success():
return False
return True | check all sessions to see if they have completed successfully |
def store_vdp_vsi(self, port_uuid, mgrid, typeid, typeid_ver,
vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan,
new_network, reply, oui_id, oui_data, vsw_cb_fn,
vsw_cb_data, reason):
"""Stores the vNIC specific info for VDP Refresh.
:param uuid: vNIC UUID
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param new_network: Is this the first vNIC of this network
:param reply: Response from the switch
:param oui_id: OUI Type
:param oui_data: OUI Data
:param vsw_cb_fn: Callback function from the app.
:param vsw_cb_data: Callback data for the app.
:param reason: Failure Reason
"""
if port_uuid in self.vdp_vif_map:
LOG.debug("Not Storing VDP VSI MAC %(mac)s UUID %(uuid)s",
{'mac': mac, 'uuid': vsiid})
if new_network:
vdp_vlan = reply
else:
vdp_vlan = vlan
vdp_dict = {'vdp_vlan': vdp_vlan,
'mgrid': mgrid,
'typeid': typeid,
'typeid_ver': typeid_ver,
'vsiid_frmt': vsiid_frmt,
'vsiid': vsiid,
'filter_frmt': filter_frmt,
'mac': mac,
'gid': gid,
'vsw_cb_fn': vsw_cb_fn,
'vsw_cb_data': vsw_cb_data,
'fail_reason': reason,
'callback_count': 0}
self.vdp_vif_map[port_uuid] = vdp_dict
LOG.debug("Storing VDP VSI MAC %(mac)s UUID %(uuid)s VDP VLAN "
"%(vlan)s", {'mac': mac, 'uuid': vsiid, 'vlan': vdp_vlan})
if oui_id:
self.store_oui(port_uuid, oui_id, oui_data) | Stores the vNIC specific info for VDP Refresh.
:param uuid: vNIC UUID
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param new_network: Is this the first vNIC of this network
:param reply: Response from the switch
:param oui_id: OUI Type
:param oui_data: OUI Data
:param vsw_cb_fn: Callback function from the app.
:param vsw_cb_data: Callback data for the app.
:param reason: Failure Reason |
async def whowas(self, nickname):
"""
Return information about offline user.
This is an blocking asynchronous method: it has to be called from a coroutine, as follows:
info = await self.whowas('Nick')
"""
# Same treatment as nicknames in whois.
if protocol.ARGUMENT_SEPARATOR.search(nickname) is not None:
result = self.eventloop.create_future()
result.set_result(None)
return result
if nickname not in self._pending['whowas']:
await self.rawmsg('WHOWAS', nickname)
self._whowas_info[nickname] = {}
# Create a future for when the WHOWAS requests succeeds.
self._pending['whowas'][nickname] = self.eventloop.create_future()
return await self._pending['whowas'][nickname] | Return information about offline user.
This is an blocking asynchronous method: it has to be called from a coroutine, as follows:
info = await self.whowas('Nick') |
def output_paas(gandi, paas, datacenters, vhosts, output_keys, justify=11):
""" Helper to output a paas information."""
output_generic(gandi, paas, output_keys, justify)
if 'sftp_server' in output_keys:
output_line(gandi, 'sftp_server', paas['ftp_server'], justify)
if 'vhost' in output_keys:
for entry in vhosts:
output_line(gandi, 'vhost', entry, justify)
if 'dc' in output_keys:
dc_name = paas['datacenter'].get('dc_code',
paas['datacenter'].get('iso', ''))
output_line(gandi, 'datacenter', dc_name, justify)
if 'df' in paas:
df = paas['df']
total = df['free'] + df['used']
if total:
disk_used = '%.1f%%' % (df['used'] * 100 / total)
output_line(gandi, 'quota used', disk_used, justify)
if 'snapshot' in output_keys:
val = None
if paas['snapshot_profile']:
val = paas['snapshot_profile']['name']
output_line(gandi, 'snapshot', val, justify)
if 'cache' in paas:
cache = paas['cache']
total = cache['hit'] + cache['miss'] + cache['not'] + cache['pass']
if total:
output_line(gandi, 'cache', None, justify)
for key in sorted(cache):
str_value = '%.1f%%' % (cache[key] * 100 / total)
output_sub_line(gandi, key, str_value, 5) | Helper to output a paas information. |
def rot90(img):
'''
rotate one or multiple grayscale or color images 90 degrees
'''
s = img.shape
if len(s) == 3:
if s[2] in (3, 4): # color image
out = np.empty((s[1], s[0], s[2]), dtype=img.dtype)
for i in range(s[2]):
out[:, :, i] = np.rot90(img[:, :, i])
else: # mutliple grayscale
out = np.empty((s[0], s[2], s[1]), dtype=img.dtype)
for i in range(s[0]):
out[i] = np.rot90(img[i])
elif len(s) == 2: # one grayscale
out = np.rot90(img)
elif len(s) == 4 and s[3] in (3, 4): # multiple color
out = np.empty((s[0], s[2], s[1], s[3]), dtype=img.dtype)
for i in range(s[0]): # for each img
for j in range(s[3]): # for each channel
out[i, :, :, j] = np.rot90(img[i, :, :, j])
else:
NotImplemented
return out | rotate one or multiple grayscale or color images 90 degrees |
async def inline_query(self, bot, query, *, offset=None, geo_point=None):
"""
Makes the given inline query to the specified bot
i.e. ``@vote My New Poll`` would be as follows:
>>> client = ...
>>> client.inline_query('vote', 'My New Poll')
Args:
bot (`entity`):
The bot entity to which the inline query should be made.
query (`str`):
The query that should be made to the bot.
offset (`str`, optional):
The string offset to use for the bot.
geo_point (:tl:`GeoPoint`, optional)
The geo point location information to send to the bot
for localised results. Available under some bots.
Returns:
A list of `custom.InlineResult
<telethon.tl.custom.inlineresult.InlineResult>`.
"""
bot = await self.get_input_entity(bot)
result = await self(functions.messages.GetInlineBotResultsRequest(
bot=bot,
peer=types.InputPeerEmpty(),
query=query,
offset=offset or '',
geo_point=geo_point
))
return custom.InlineResults(self, result) | Makes the given inline query to the specified bot
i.e. ``@vote My New Poll`` would be as follows:
>>> client = ...
>>> client.inline_query('vote', 'My New Poll')
Args:
bot (`entity`):
The bot entity to which the inline query should be made.
query (`str`):
The query that should be made to the bot.
offset (`str`, optional):
The string offset to use for the bot.
geo_point (:tl:`GeoPoint`, optional)
The geo point location information to send to the bot
for localised results. Available under some bots.
Returns:
A list of `custom.InlineResult
<telethon.tl.custom.inlineresult.InlineResult>`. |
def GetInput(self):
"Build the INPUT structure for the action"
actions = 1
# if both up and down
if self.up and self.down:
actions = 2
inputs = (INPUT * actions)()
vk, scan, flags = self._get_key_info()
for inp in inputs:
inp.type = INPUT_KEYBOARD
inp._.ki.wVk = vk
inp._.ki.wScan = scan
inp._.ki.dwFlags |= flags
# if we are releasing - then let it up
if self.up:
inputs[-1]._.ki.dwFlags |= KEYEVENTF_KEYUP
return inputs | Build the INPUT structure for the action |
def _reconnect_delay(self):
""" Calculate reconnection delay. """
if self.RECONNECT_ON_ERROR and self.RECONNECT_DELAYED:
if self._reconnect_attempts >= len(self.RECONNECT_DELAYS):
return self.RECONNECT_DELAYS[-1]
else:
return self.RECONNECT_DELAYS[self._reconnect_attempts]
else:
return 0 | Calculate reconnection delay. |
def hash(value, arg):
"""
Returns a hex-digest of the passed in value for the hash algorithm given.
"""
arg = str(arg).lower()
if sys.version_info >= (3,0):
value = value.encode("utf-8")
if not arg in get_available_hashes():
raise TemplateSyntaxError("The %s hash algorithm does not exist. Supported algorithms are: %" % (arg, get_available_hashes()))
try:
f = getattr(hashlib, arg)
hashed = f(value).hexdigest()
except Exception:
raise ValueError("The %s hash algorithm cannot produce a hex digest. Ensure that OpenSSL is properly installed." % arg)
return hashed | Returns a hex-digest of the passed in value for the hash algorithm given. |
def stem(self, text):
"""Stem each word of the Latin text."""
stemmed_text = ''
for word in text.split(' '):
if word not in self.stops:
# remove '-que' suffix
word, in_que_pass_list = self._checkremove_que(word)
if not in_que_pass_list:
# remove the simple endings from the target word
word, was_stemmed = self._matchremove_simple_endings(word)
# if word didn't match the simple endings, try verb endings
if not was_stemmed:
word = self._matchremove_verb_endings(word)
# add the stemmed word to the text
stemmed_text += word + ' '
return stemmed_text | Stem each word of the Latin text. |
def query(usr, pwd, *hpo_terms):
"""
Query the phenomizer web tool
Arguments:
usr (str): A username for phenomizer
pwd (str): A password for phenomizer
hpo_terms (list): A list with hpo terms
yields:
parsed_term (dict): A dictionary with the parsed information
from phenomizer
"""
raw_result = query_phenomizer(usr, pwd, *hpo_terms)
for line in raw_result.text.split('\n'):
if len(line) > 1:
if not line.startswith('#'):
yield parse_result(line) | Query the phenomizer web tool
Arguments:
usr (str): A username for phenomizer
pwd (str): A password for phenomizer
hpo_terms (list): A list with hpo terms
yields:
parsed_term (dict): A dictionary with the parsed information
from phenomizer |
def _bss_decomp_mtifilt_images(reference_sources, estimated_source, j, flen,
Gj=None, G=None):
"""Decomposition of an estimated source image into four components
representing respectively the true source image, spatial (or filtering)
distortion, interference and artifacts, derived from the true source
images using multichannel time-invariant filters.
Adapted version to work with multichannel sources.
Improved performance can be gained by passing Gj and G parameters initially
as all zeros. These parameters store the results from the computation of
the G matrix in _project_images and then return them for subsequent calls
to this function. This only works when not computing permuations.
"""
nsampl = np.shape(estimated_source)[0]
nchan = np.shape(estimated_source)[1]
# are we saving the Gj and G parameters?
saveg = Gj is not None and G is not None
# decomposition
# true source image
s_true = np.hstack((np.reshape(reference_sources[j],
(nsampl, nchan),
order="F").transpose(),
np.zeros((nchan, flen - 1))))
# spatial (or filtering) distortion
if saveg:
e_spat, Gj = _project_images(reference_sources[j, np.newaxis, :],
estimated_source, flen, Gj)
else:
e_spat = _project_images(reference_sources[j, np.newaxis, :],
estimated_source, flen)
e_spat = e_spat - s_true
# interference
if saveg:
e_interf, G = _project_images(reference_sources,
estimated_source, flen, G)
else:
e_interf = _project_images(reference_sources,
estimated_source, flen)
e_interf = e_interf - s_true - e_spat
# artifacts
e_artif = -s_true - e_spat - e_interf
e_artif[:, :nsampl] += estimated_source.transpose()
# return Gj and G only if they were passed in
if saveg:
return (s_true, e_spat, e_interf, e_artif, Gj, G)
else:
return (s_true, e_spat, e_interf, e_artif) | Decomposition of an estimated source image into four components
representing respectively the true source image, spatial (or filtering)
distortion, interference and artifacts, derived from the true source
images using multichannel time-invariant filters.
Adapted version to work with multichannel sources.
Improved performance can be gained by passing Gj and G parameters initially
as all zeros. These parameters store the results from the computation of
the G matrix in _project_images and then return them for subsequent calls
to this function. This only works when not computing permuations. |
def _close_holding(self, trade):
"""
应用平仓,并计算平仓盈亏
买平:
delta_realized_pnl = sum of ((trade_price - cost_price)* quantity) of closed trades * contract_multiplier
卖平:
delta_realized_pnl = sum of ((cost_price - trade_price)* quantity) of closed trades * contract_multiplier
:param trade: rqalpha.model.trade.Trade
:return: float
"""
left_quantity = trade.last_quantity
delta = 0
if trade.side == SIDE.BUY:
# 先平昨仓
if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._sell_old_holding_list) != 0:
old_price, old_quantity = self._sell_old_holding_list.pop()
if old_quantity > left_quantity:
consumed_quantity = left_quantity
self._sell_old_holding_list = [(old_price, old_quantity - left_quantity)]
else:
consumed_quantity = old_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity)
# 再平今仓
while True:
if left_quantity <= 0:
break
oldest_price, oldest_quantity = self._sell_today_holding_list.pop()
if oldest_quantity > left_quantity:
consumed_quantity = left_quantity
self._sell_today_holding_list.append((oldest_price, oldest_quantity - left_quantity))
else:
consumed_quantity = oldest_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity)
else:
# 先平昨仓
if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._buy_old_holding_list) != 0:
old_price, old_quantity = self._buy_old_holding_list.pop()
if old_quantity > left_quantity:
consumed_quantity = left_quantity
self._buy_old_holding_list = [(old_price, old_quantity - left_quantity)]
else:
consumed_quantity = old_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity)
# 再平今仓
while True:
if left_quantity <= 0:
break
oldest_price, oldest_quantity = self._buy_today_holding_list.pop()
if oldest_quantity > left_quantity:
consumed_quantity = left_quantity
self._buy_today_holding_list.append((oldest_price, oldest_quantity - left_quantity))
left_quantity = 0
else:
consumed_quantity = oldest_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity)
return delta | 应用平仓,并计算平仓盈亏
买平:
delta_realized_pnl = sum of ((trade_price - cost_price)* quantity) of closed trades * contract_multiplier
卖平:
delta_realized_pnl = sum of ((cost_price - trade_price)* quantity) of closed trades * contract_multiplier
:param trade: rqalpha.model.trade.Trade
:return: float |
def domain_delete(domain, logger):
"""libvirt domain undefinition.
@raise: libvirt.libvirtError.
"""
if domain is not None:
try:
if domain.isActive():
domain.destroy()
except libvirt.libvirtError:
logger.exception("Unable to destroy the domain.")
try:
domain.undefine()
except libvirt.libvirtError:
try:
domain.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA) # domain with snapshots
except libvirt.libvirtError:
logger.exception("Unable to undefine the domain.") | libvirt domain undefinition.
@raise: libvirt.libvirtError. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.