code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def is_unary_operator(oper):
"""returns True, if operator is unary operator, otherwise False"""
# definition:
# member in class
# ret-type operator symbol()
# ret-type operator [++ --](int)
# globally
# ret-type operator symbol( arg )
# ret-type operator [++ --](X&, int)
symbols = ['!', '&', '~', '*', '+', '++', '-', '--']
if not isinstance(oper, calldef_members.operator_t):
return False
if oper.symbol not in symbols:
return False
if isinstance(oper, calldef_members.member_operator_t):
if len(oper.arguments) == 0:
return True
elif oper.symbol in ['++', '--'] and \
isinstance(oper.arguments[0].decl_type, cpptypes.int_t):
return True
return False
if len(oper.arguments) == 1:
return True
elif oper.symbol in ['++', '--'] \
and len(oper.arguments) == 2 \
and isinstance(oper.arguments[1].decl_type, cpptypes.int_t):
# may be I need to add additional check whether first argument is
# reference or not?
return True
return False | returns True, if operator is unary operator, otherwise False |
def Power(base: vertex_constructor_param_types, exponent: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Raises a vertex to the power of another
:param base: the base vertex
:param exponent: the exponent vertex
"""
return Double(context.jvm_view().PowerVertex, label, cast_to_double_vertex(base), cast_to_double_vertex(exponent)) | Raises a vertex to the power of another
:param base: the base vertex
:param exponent: the exponent vertex |
def word(self, value):
"""Property of the DigitWord returning (or setting) the DigitWord as a list of integers (or
string representations) of DigitModel. The property is called during instantiation as the
property validates the value passed and ensures that all digits are valid. The values can
be passed as ANY iterable"""
self._validate_word(value=value)
_word = []
# Iterate the values passed.
for a in value:
# Check the value is an int or a string.
if not (isinstance(a, int) or isinstance(a, str) or isinstance(a, unicode)):
raise ValueError('DigitWords must be made from digits (strings or ints) '
'between 0 and 9 for decimal and 0 and 15 for hex')
# This convoluted check is caused by the remove of the unicode type in Python 3+
# If this is Python2.x, then we need to convert unicode to string, otherwise
# we leave it as is.
if sys.version_info[0] == 2 and isinstance(a, unicode):
_a = str(a)
else:
_a = a
# Create the correct type of Digit based on the wordtype of the DigitWord
if self.wordtype == DigitWord.DIGIT:
_digit = Digit(_a)
elif self.wordtype == DigitWord.HEXDIGIT:
_digit = HexDigit(_a)
else:
raise TypeError('The wordtype is not valid.')
_word.append(_digit)
self._word = _word | Property of the DigitWord returning (or setting) the DigitWord as a list of integers (or
string representations) of DigitModel. The property is called during instantiation as the
property validates the value passed and ensures that all digits are valid. The values can
be passed as ANY iterable |
def set(self, id, value):
"""
根据 id 写入数据。
:param id: 要写入的 id
:param value: 要写入的数据,可以是一个 ``dict`` 对象
"""
id = self.key_name(id)
self.redis.set(id, json_dumps(value)) | 根据 id 写入数据。
:param id: 要写入的 id
:param value: 要写入的数据,可以是一个 ``dict`` 对象 |
def _make_stream_handler_nodes(self, dsk_graph, array, iteration_order,
masked):
"""
Produce task graph entries for an array that comes from a biggus
StreamsHandler.
This is essentially every type of array that isn't already a thing on
disk/in-memory. StreamsHandler arrays include all aggregations and
elementwise operations.
"""
nodes = {}
handler = array.streams_handler(masked)
input_iteration_order = handler.input_iteration_order(iteration_order)
def input_keys_transform(input_array, keys):
if hasattr(input_array, 'streams_handler'):
handler = input_array.streams_handler(masked)
# Get the transformer of the input array, and apply it to the
# keys.
input_transformer = getattr(handler,
'output_keys', None)
if input_transformer is not None:
keys = input_transformer(keys)
return keys
sources_keys = []
sources_chunks = []
for input_array in array.sources:
# Bring together all chunks that influence the same part of this
# (resultant) array.
source_chunks_by_key = {}
sources_chunks.append(source_chunks_by_key)
source_keys = []
sources_keys.append(source_keys)
# Make nodes for the source arrays (if they don't already exist)
# before we do anything else.
input_nodes = self._make_nodes(dsk_graph, input_array,
input_iteration_order, masked)
for chunk_id, task in input_nodes.items():
chunk_keys = task[1]
t_keys = chunk_keys
t_keys = input_keys_transform(array, t_keys)
source_keys.append(t_keys)
this_key = str(t_keys)
source_chunks_by_key.setdefault(this_key,
[]).append([chunk_id, task])
sources_keys_grouped = key_grouper.group_keys(array.shape,
*sources_keys)
for slice_group, sources_keys_group in sources_keys_grouped.items():
# Each group is entirely independent and can have its own task
# without knowledge of results from items in other groups.
t_keys = tuple(slice(*slice_tuple) for slice_tuple in slice_group)
all_chunks = []
for source_keys, source_chunks_by_key in zip(sources_keys_group,
sources_chunks):
dependencies = tuple(
the_id
for keys in source_keys
for the_id, task in source_chunks_by_key[str(keys)])
# Uniquify source_keys, but keep the order.
dependencies = tuple(_unique_everseen(dependencies))
def normalize_keys(keys, shape):
result = []
for key, dim_length in zip(keys, shape):
result.append(key_grouper.normalize_slice(key,
dim_length))
return tuple(result)
# If we don't have the same chunks for all inputs then we
# should combine them before passing them on to the handler.
# TODO: Fix slice equality to deal with 0 and None etc.
if not all(t_keys == normalize_keys(keys, array.shape)
for keys in source_keys):
combined = self.collect(array[t_keys], masked, chunk=True)
new_task = (combined, ) + dependencies
new_id = ('chunk shape: {}\n\n{}'
''.format(array[t_keys].shape, uuid.uuid()))
dsk_graph[new_id] = new_task
dependencies = (new_id, )
all_chunks.append(dependencies)
pivoted = all_chunks
sub_array = array[t_keys]
handler = sub_array.streams_handler(masked)
name = getattr(handler, 'nice_name', handler.__class__.__name__)
if hasattr(handler, 'axis'):
name += '\n(axis={})'.format(handler.axis)
# For ElementwiseStreams handlers, use the function that they wrap
# (e.g "add")
if hasattr(handler, 'operator'):
name = handler.operator.__name__
n_sources = len(array.sources)
handler_of_chunks_fn = self.create_chunks_handler_fn(handler,
n_sources,
name)
shape = sub_array.shape
if all(key == slice(None) for key in t_keys):
subset = ''
else:
pretty_index = ', '.join(map(slice_repr, t_keys))
subset = 'target subset [{}]\n'.format(pretty_index)
# Flatten out the pivot so that dask can dereferences the IDs
source_chunks = [item for sublist in pivoted for item in sublist]
task = tuple([handler_of_chunks_fn, t_keys] + source_chunks)
shape_repr = ', '.join(map(str, shape))
chunk_id = 'chunk shape: ({})\n\n{}{}'.format(shape_repr,
subset,
uuid.uuid4())
assert chunk_id not in dsk_graph
dsk_graph[chunk_id] = task
nodes[chunk_id] = task
return nodes | Produce task graph entries for an array that comes from a biggus
StreamsHandler.
This is essentially every type of array that isn't already a thing on
disk/in-memory. StreamsHandler arrays include all aggregations and
elementwise operations. |
def upload(self, local_fn: str, remote_fn: str = '',
dont_overwrite: bool = False):
"""Uploads given file to the task. If remote_fn is not specified, dumps it
into task current directory with the same name.
Args:
local_fn: location of file locally
remote_fn: location of file on task
dont_overwrite: if True, will be no-op if target file exists
"""
raise NotImplementedError() | Uploads given file to the task. If remote_fn is not specified, dumps it
into task current directory with the same name.
Args:
local_fn: location of file locally
remote_fn: location of file on task
dont_overwrite: if True, will be no-op if target file exists |
def context_teardown(func: Callable):
"""
Wrap an async generator function to execute the rest of the function at context teardown.
This function returns an async function, which, when called, starts the wrapped async
generator. The wrapped async function is run until the first ``yield`` statement
(``await async_generator.yield_()`` on Python 3.5). When the context is being torn down, the
exception that ended the context, if any, is sent to the generator.
For example::
class SomeComponent(Component):
@context_teardown
async def start(self, ctx: Context):
service = SomeService()
ctx.add_resource(service)
exception = yield
service.stop()
:param func: an async generator function
:return: an async function
"""
@wraps(func)
async def wrapper(*args, **kwargs) -> None:
async def teardown_callback(exception: Optional[Exception]):
try:
await generator.asend(exception)
except StopAsyncIteration:
pass
finally:
await generator.aclose()
try:
ctx = next(arg for arg in args[:2] if isinstance(arg, Context))
except StopIteration:
raise RuntimeError('the first positional argument to {}() has to be a Context '
'instance'.format(callable_name(func))) from None
generator = func(*args, **kwargs)
try:
await generator.asend(None)
except StopAsyncIteration:
pass
except BaseException:
await generator.aclose()
raise
else:
ctx.add_teardown_callback(teardown_callback, True)
if iscoroutinefunction(func):
func = async_generator(func)
elif not isasyncgenfunction(func):
raise TypeError('{} must be an async generator function'.format(callable_name(func)))
return wrapper | Wrap an async generator function to execute the rest of the function at context teardown.
This function returns an async function, which, when called, starts the wrapped async
generator. The wrapped async function is run until the first ``yield`` statement
(``await async_generator.yield_()`` on Python 3.5). When the context is being torn down, the
exception that ended the context, if any, is sent to the generator.
For example::
class SomeComponent(Component):
@context_teardown
async def start(self, ctx: Context):
service = SomeService()
ctx.add_resource(service)
exception = yield
service.stop()
:param func: an async generator function
:return: an async function |
async def say(self, body, user, options):
"""
say something to user
:param body:
:param user:
:return:
"""
return await self.send_text_message_to_all_interfaces(
recipient=user, text=body, options=options) | say something to user
:param body:
:param user:
:return: |
def write_observation_zone(self, num, **kw):
"""
Write observation zone information for a taskpoint::
writer.write_task_options(
start_time=time(12, 34, 56),
task_time=timedelta(hours=1, minutes=45, seconds=12),
waypoint_distance=False,
distance_tolerance=(0.7, 'km'),
altitude_tolerance=300.0,
)
# -> Options,NoStart=12:34:56,TaskTime=01:45:12,WpDis=False,NearDis=0.7km,NearAlt=300.0m
:param num: consecutive number of a waypoint (``0``: Start)
:param style: direction (``0``: Fixed value, ``1``: Symmetrical, ``2``:
To next point, ``3``: To previous point, ``4``: To start point
:param radius: radius 1 in meter or as ``(radius, unit)`` tuple
:param angle: angle 1 in degrees
:param radius2: radius 2 in meter or as ``(radius, unit)`` tuple
:param angle 2: angle 2 in degrees
:param angle12: angle 12 in degress
:param line: should be ``True`` if start or finish line
"""
if not self.in_task_section:
raise RuntimeError(
u'Observation zones have to be written in task section')
fields = [u'ObsZone=%d' % num]
if 'style' in kw:
fields.append(u'Style=%d' % kw['style'])
if 'radius' in kw:
fields.append(u'R1=' + self.format_distance(kw['radius']))
if 'angle' in kw:
fields.append(u'A1=' + self.format_angle(kw['angle']))
if 'radius2' in kw:
fields.append(u'R2=' + self.format_distance(kw['radius2']))
if 'angle2' in kw:
fields.append(u'A2=' + self.format_angle(kw['angle2']))
if 'angle12' in kw:
fields.append(u'A12=' + self.format_angle(kw['angle12']))
if 'line' in kw:
fields.append(u'Line=' + ('1' if kw['line'] else '0'))
self.write_fields(fields) | Write observation zone information for a taskpoint::
writer.write_task_options(
start_time=time(12, 34, 56),
task_time=timedelta(hours=1, minutes=45, seconds=12),
waypoint_distance=False,
distance_tolerance=(0.7, 'km'),
altitude_tolerance=300.0,
)
# -> Options,NoStart=12:34:56,TaskTime=01:45:12,WpDis=False,NearDis=0.7km,NearAlt=300.0m
:param num: consecutive number of a waypoint (``0``: Start)
:param style: direction (``0``: Fixed value, ``1``: Symmetrical, ``2``:
To next point, ``3``: To previous point, ``4``: To start point
:param radius: radius 1 in meter or as ``(radius, unit)`` tuple
:param angle: angle 1 in degrees
:param radius2: radius 2 in meter or as ``(radius, unit)`` tuple
:param angle 2: angle 2 in degrees
:param angle12: angle 12 in degress
:param line: should be ``True`` if start or finish line |
async def update_champs(self):
"""A method which updates ``self.rune_links``.
This is useful because runeforge.gg is frequently updating.
Raises
------
RuneConnectionError
If the request does not return with a status of 200.
"""
html = await self._get(self.URL)
self.rune_links = utils.parse_rune_links(html) | A method which updates ``self.rune_links``.
This is useful because runeforge.gg is frequently updating.
Raises
------
RuneConnectionError
If the request does not return with a status of 200. |
def _handle_response(self, response):
"""
Handles the response received from Scrapyd.
"""
if not response.ok:
raise ScrapydResponseError(
"Scrapyd returned a {0} error: {1}".format(
response.status_code,
response.text))
try:
json = response.json()
except ValueError:
raise ScrapydResponseError("Scrapyd returned an invalid JSON "
"response: {0}".format(response.text))
if json['status'] == 'ok':
json.pop('status')
return json
elif json['status'] == 'error':
raise ScrapydResponseError(json['message']) | Handles the response received from Scrapyd. |
def add_organism(self, common_name, directory, blatdb=None, genus=None,
species=None, public=False):
"""
Add an organism
:type common_name: str
:param common_name: Organism common name
:type directory: str
:param directory: Server-side directory
:type blatdb: str
:param blatdb: Server-side Blat directory for the organism
:type genus: str
:param genus: Genus
:type species: str
:param species: Species
:type public: bool
:param public: User's email
:rtype: dict
:return: a dictionary with information about the new organism
"""
data = {
'commonName': common_name,
'directory': directory,
'publicMode': public,
}
if blatdb is not None:
data['blatdb'] = blatdb
if genus is not None:
data['genus'] = genus
if species is not None:
data['species'] = species
response = self.post('addOrganism', data)
# Apollo decides here that it would be nice to return information about
# EVERY organism. LMAO.
if type(response) is not list:
return response
return [x for x in response if x['commonName'] == common_name][0] | Add an organism
:type common_name: str
:param common_name: Organism common name
:type directory: str
:param directory: Server-side directory
:type blatdb: str
:param blatdb: Server-side Blat directory for the organism
:type genus: str
:param genus: Genus
:type species: str
:param species: Species
:type public: bool
:param public: User's email
:rtype: dict
:return: a dictionary with information about the new organism |
def get_best_fit_parameters_translated_grouped(self):
"""Returns the parameters as a dictionary of the 'real units' for the best fit."""
result_dict = dict()
result_dict['ocv'] = [parameters['ocv'] for parameters in
self.best_fit_parameters_translated]
result_dict['ir'] = [parameters['ir'] for parameters in
self.best_fit_parameters_translated]
for i in range(self.circuits):
result_dict['r' + str(i)] = [parameters['r' + str(i)] for parameters
in self.best_fit_parameters_translated]
result_dict['c' + str(i)] = [parameters['c' + str(i)] for parameters
in self.best_fit_parameters_translated]
return result_dict | Returns the parameters as a dictionary of the 'real units' for the best fit. |
def _send_command(self, cmd=""):
"""
Handle reading/writing channel directly. It is also sanitizing the output received.
Parameters
----------
cmd : str, optional
The command to send to the remote device (default : "", just send a new line)
Returns
-------
output : str
The output from the command sent
"""
self.connection.write_channel(cmd + "\n")
time.sleep(1)
output = self.connection._read_channel_timing()
output = self.connection.strip_ansi_escape_codes(output)
output = self.connection.strip_backspaces(output)
return output | Handle reading/writing channel directly. It is also sanitizing the output received.
Parameters
----------
cmd : str, optional
The command to send to the remote device (default : "", just send a new line)
Returns
-------
output : str
The output from the command sent |
def find_matches(self, content, file_to_handle):
"""Find all matches of an expression in a file
"""
# look for all match groups in the content
groups = [match.groupdict() for match in
self.match_expression.finditer(content)]
# filter out content not in the matchgroup
matches = [group['matchgroup'] for group in groups
if group.get('matchgroup')]
logger.info('Found %s matches in %s', len(matches), file_to_handle)
# We only need the unique strings found as we'll be replacing each
# of them. No need to replace the ones already replaced.
return list(set(matches)) | Find all matches of an expression in a file |
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields | Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms |
def replace_seqres(self, pdb, update_atoms = True):
"""Replace SEQRES lines with a new sequence, optionally removing
mutated sidechains"""
newpdb = PDB()
inserted_seqres = False
entries_before_seqres = set(["HEADER", "OBSLTE", "TITLE", "CAVEAT", "COMPND", "SOURCE",
"KEYWDS", "EXPDTA", "AUTHOR", "REVDAT", "SPRSDE", "JRNL",
"REMARK", "DBREF", "SEQADV"])
mutated_resids = {}
if update_atoms:
old_seqs = ChainSequences()
chainresnums = old_seqs.parse_atoms(pdb)
assert self.keys() == old_seqs.keys()
for chain in self.keys():
assert len(self[chain]) == len(old_seqs[chain])
for i in xrange(len(self[chain])):
if self[chain][i] != old_seqs[chain][i]:
resid = chain + chainresnums[chain][i]
mutated_resids[resid] = self[chain][i]
for line in pdb.lines:
entry = line[0:6]
if (not inserted_seqres) and entry not in entries_before_seqres:
inserted_seqres = True
newpdb.lines += self.seqres_lines()
if update_atoms and entry == "ATOM ":
resid = line[21:27]
atom = line[12:16].strip()
if not mutated_resids.has_key(resid):
newpdb.lines += [line]
else:
newpdb.lines += [line[:17] + mutated_resids[resid] + line[20:]]
elif entry != "SEQRES":
newpdb.lines += [line]
if update_atoms:
newpdb.remove_nonbackbone_atoms(mutated_resids.keys())
return newpdb | Replace SEQRES lines with a new sequence, optionally removing
mutated sidechains |
def loguniform(low, high, random_state):
'''
low: an float that represent an lower bound
high: an float that represent an upper bound
random_state: an object of numpy.random.RandomState
'''
assert low > 0, 'Lower bound must be positive'
return np.exp(uniform(np.log(low), np.log(high), random_state)) | low: an float that represent an lower bound
high: an float that represent an upper bound
random_state: an object of numpy.random.RandomState |
def read(self, filename):
"""Reads the file specified by filename
This method will load the eapi.conf file specified by filename into
the instance object. It will also add the default connection localhost
if it was not defined in the eapi.conf file
Args:
filename (str): The full path to the file to load
"""
try:
SafeConfigParser.read(self, filename)
except SafeConfigParserError as exc:
# Ignore file and syslog a message on SafeConfigParser errors
msg = ("%s: parsing error in eapi conf file: %s" %
(type(exc).__name__, filename))
debug(msg)
self._add_default_connection()
for name in self.sections():
if name.startswith('connection:') and \
'host' not in dict(self.items(name)):
self.set(name, 'host', name.split(':')[1])
self.generate_tags() | Reads the file specified by filename
This method will load the eapi.conf file specified by filename into
the instance object. It will also add the default connection localhost
if it was not defined in the eapi.conf file
Args:
filename (str): The full path to the file to load |
def get_output(self):
"""
Retrieve the stored data in full.
This call may block if the reading thread has not yet terminated.
"""
self._closing = True
if not self.has_finished():
if self._debug:
# Main thread overtook stream reading thread.
underrun_debug_timer = timeit.default_timer()
logger.warning("NBSR underrun")
self._thread.join()
if not self.has_finished():
if self._debug:
logger.debug(
"NBSR join after %f seconds, underrun not resolved"
% (timeit.default_timer() - underrun_debug_timer)
)
raise Exception("thread did not terminate")
if self._debug:
logger.debug(
"NBSR underrun resolved after %f seconds"
% (timeit.default_timer() - underrun_debug_timer)
)
if self._closed:
raise Exception("streamreader double-closed")
self._closed = True
data = self._buffer.getvalue()
self._buffer.close()
return data | Retrieve the stored data in full.
This call may block if the reading thread has not yet terminated. |
def periodic_service_rpcs(self):
"""Check if any RPC has expired and remove it from the in flight list.
This function should be called periodically to expire any RPCs that never complete.
"""
to_remove = []
now = monotonic()
for rpc_tag, rpc in self.in_flight_rpcs.items():
expiry = rpc.sent_timestamp + rpc.timeout
if now > expiry:
to_remove.append(rpc_tag)
for tag in to_remove:
del self.in_flight_rpcs[tag] | Check if any RPC has expired and remove it from the in flight list.
This function should be called periodically to expire any RPCs that never complete. |
def setup_menu(self):
"""Setup context menu"""
self.copy_action = create_action(self, _('Copy'),
shortcut=keybinding('Copy'),
icon=ima.icon('editcopy'),
triggered=self.copy,
context=Qt.WidgetShortcut)
menu = QMenu(self)
add_actions(menu, [self.copy_action, ])
return menu | Setup context menu |
def transmit(self, payload, **kwargs):
"""
Send a completion status call to Degreed using the client.
Args:
payload: The learner completion data payload to send to Degreed
"""
kwargs['app_label'] = 'degreed'
kwargs['model_name'] = 'DegreedLearnerDataTransmissionAudit'
kwargs['remote_user_id'] = 'degreed_user_email'
super(DegreedLearnerTransmitter, self).transmit(payload, **kwargs) | Send a completion status call to Degreed using the client.
Args:
payload: The learner completion data payload to send to Degreed |
def write_tabular(obj, filepath):
"""Write tabular object in HDF5 or pickle format
Args:
obj (array or DataFrame): tabular object to write
filepath (path-like): path to write to; must end in '.h5' or '.pkl'
"""
_, fn, ext = splitext2(filepath)
if ext == '.h5':
_write_tabular_h5(obj, filepath)
elif ext == '.pkl':
_write_tabular_pickle(obj, filepath)
else:
raise NotImplementedError | Write tabular object in HDF5 or pickle format
Args:
obj (array or DataFrame): tabular object to write
filepath (path-like): path to write to; must end in '.h5' or '.pkl' |
def include(prop):
'''Replicate property that is normally not replicated. Right now it's
meaningful for one-to-many relations only.'''
if isinstance(prop, QueryableAttribute):
prop = prop.property
assert isinstance(prop, (Column, ColumnProperty, RelationshipProperty))
#assert isinstance(prop, RelationshipProperty)
_included.add(prop) | Replicate property that is normally not replicated. Right now it's
meaningful for one-to-many relations only. |
def do(self):
"""Executes the request represented by this object. The requests library will be used for this purpose.
Returns an instance of requests.Response.
"""
data = None
if self.body is not None and self.body != b'':
data = self.body
return requests.request(self.method, str(self.url), data=data, headers=self.header) | Executes the request represented by this object. The requests library will be used for this purpose.
Returns an instance of requests.Response. |
def deserialize(cls, target_class, array):
"""
:type target_class: core.Installation|type
:type array: list
:rtype: core.Installation
"""
installation = target_class.__new__(target_class)
server_public_key_wrapped = array[cls._INDEX_SERVER_PUBLIC_KEY]
installation.__dict__ = {
cls._ATTRIBUTE_ID: converter.deserialize(
core.Id,
array[cls._INDEX_ID][cls._FIELD_ID]
),
cls._ATTRIBUTE_TOKEN: converter.deserialize(
core.SessionToken,
array[cls._INDEX_TOKEN][cls._FIELD_TOKEN]
),
cls._ATTRIBUTE_SERVER_PUBLIC_KEY: converter.deserialize(
core.PublicKeyServer,
server_public_key_wrapped[cls._FIELD_SERVER_PUBLIC_KEY]
),
}
return installation | :type target_class: core.Installation|type
:type array: list
:rtype: core.Installation |
def train_epoch(self, epoch_info: EpochInfo, interactive=True):
""" Train model on an epoch of a fixed number of batch updates """
epoch_info.on_epoch_begin()
if interactive:
iterator = tqdm.trange(epoch_info.batches_per_epoch, file=sys.stdout, desc="Training", unit="batch")
else:
iterator = range(epoch_info.batches_per_epoch)
for batch_idx in iterator:
batch_info = BatchInfo(epoch_info, batch_idx)
batch_info.on_batch_begin()
self.train_batch(batch_info)
batch_info.on_batch_end()
epoch_info.result_accumulator.freeze_results()
epoch_info.on_epoch_end() | Train model on an epoch of a fixed number of batch updates |
def clean_path_middleware(environ, start_response=None):
'''Clean url from double slashes and redirect if needed.'''
path = environ['PATH_INFO']
if path and '//' in path:
url = re.sub("/+", '/', path)
if not url.startswith('/'):
url = '/%s' % url
qs = environ['QUERY_STRING']
if qs:
url = '%s?%s' % (url, qs)
raise HttpRedirect(url) | Clean url from double slashes and redirect if needed. |
def crl(self):
"""
Returns up to date CRL of this CA
"""
revoked_certs = self.get_revoked_certs()
crl = crypto.CRL()
now_str = timezone.now().strftime(generalized_time)
for cert in revoked_certs:
revoked = crypto.Revoked()
revoked.set_serial(bytes_compat(cert.serial_number))
revoked.set_reason(b'unspecified')
revoked.set_rev_date(bytes_compat(now_str))
crl.add_revoked(revoked)
return crl.export(self.x509, self.pkey, days=1, digest=b'sha256') | Returns up to date CRL of this CA |
def resolve_page(self, request, context, is_staff):
"""Return the appropriate page according to the path."""
path = context['path']
lang = context['lang']
page = Page.objects.from_path(
path, lang,
exclude_drafts=(not is_staff))
if page:
return page
# if the complete path didn't worked out properly
# and if didn't used PAGE_USE_STRICT_URL setting we gonna
# try to see if it might be a delegation page.
# To do that we remove the right part of the url and try again
# to find a page that match
if not settings.PAGE_USE_STRICT_URL:
path = remove_slug(path)
while path is not None:
page = Page.objects.from_path(
path, lang,
exclude_drafts=(not is_staff))
# find a match. Is the page delegating?
if page:
if page.delegate_to:
return page
path = remove_slug(path)
return None | Return the appropriate page according to the path. |
def start_engines(opts, proc_mgr, proxy=None):
'''
Fire up the configured engines!
'''
utils = salt.loader.utils(opts, proxy=proxy)
if opts['__role'] == 'master':
runners = salt.loader.runner(opts, utils=utils)
else:
runners = []
funcs = salt.loader.minion_mods(opts, utils=utils, proxy=proxy)
engines = salt.loader.engines(opts, funcs, runners, utils, proxy=proxy)
engines_opt = opts.get('engines', [])
if isinstance(engines_opt, dict):
engines_opt = [{k: v} for k, v in engines_opt.items()]
# Function references are not picklable. Windows needs to pickle when
# spawning processes. On Windows, these will need to be recalculated
# in the spawned child process.
if salt.utils.platform.is_windows():
runners = None
utils = None
funcs = None
for engine in engines_opt:
if isinstance(engine, dict):
engine, engine_opts = next(iter(engine.items()))
else:
engine_opts = None
engine_name = None
if engine_opts is not None and 'engine_module' in engine_opts:
fun = '{0}.start'.format(engine_opts['engine_module'])
engine_name = engine
del engine_opts['engine_module']
else:
fun = '{0}.start'.format(engine)
if fun in engines:
start_func = engines[fun]
if engine_name:
name = '{0}.Engine({1}-{2})'.format(__name__,
start_func.__module__,
engine_name)
else:
name = '{0}.Engine({1})'.format(__name__,
start_func.__module__)
log.info('Starting Engine %s', name)
proc_mgr.add_process(
Engine,
args=(
opts,
fun,
engine_opts,
funcs,
runners,
proxy
),
name=name
) | Fire up the configured engines! |
def list_gebouwen_by_huisnummer(self, huisnummer):
'''
List all `gebouwen` for a :class:`Huisnummer`.
:param huisnummer: The :class:`Huisnummer` for which the \
`gebouwen` are wanted.
:rtype: A :class:`list` of :class:`Gebouw`
'''
try:
id = huisnummer.id
except AttributeError:
id = huisnummer
def creator():
res = crab_gateway_request(
self.client, 'ListGebouwenByHuisnummerId', id
)
try:
return [
Gebouw(
r.IdentificatorGebouw,
r.AardGebouw,
r.StatusGebouw
)for r in res.GebouwItem
]
except AttributeError:
return []
if self.caches['short'].is_configured:
key = 'ListGebouwenByHuisnummerId#%s' % (id)
gebouwen = self.caches['short'].get_or_create(key, creator)
else:
gebouwen = creator()
for r in gebouwen:
r.set_gateway(self)
return gebouwen | List all `gebouwen` for a :class:`Huisnummer`.
:param huisnummer: The :class:`Huisnummer` for which the \
`gebouwen` are wanted.
:rtype: A :class:`list` of :class:`Gebouw` |
def cmd(binary, subcommand, *args, **kwargs):
"""
Construct a command line for a "modern UNIX" command.
Modern UNIX command do a closely-related-set-of-things and do it well.
Examples include :code:`apt-get` or :code:`git`.
:param binary: the name of the command
:param subcommand: the subcommand used
:param args: positional arguments (put last)
:param kwargs: options
:returns: list of arguments that is suitable to be passed to :code:`subprocess.Popen`
and friends.
When specifying options, the following assumptions are made:
* Option names begin with :code:`--` and any :code:`_` is assumed to be a :code:`-`
* If the value is :code:`NO_VALUE`, this is a "naked" option.
* If the value is a string or an int, these are presented as the value of the option.
* If the value is a list, the option will be repeated multiple times.
* If the value is a dict, the option will be repeated multiple times, and
its values will be :code:`<KEY>=<VALUE>`.
"""
ret = [binary, subcommand]
for key, value in kwargs.items():
key = '--' + key.replace('_', '-')
ret.extend(_keyword_arguments(value, key))
ret.extend(args)
return ret | Construct a command line for a "modern UNIX" command.
Modern UNIX command do a closely-related-set-of-things and do it well.
Examples include :code:`apt-get` or :code:`git`.
:param binary: the name of the command
:param subcommand: the subcommand used
:param args: positional arguments (put last)
:param kwargs: options
:returns: list of arguments that is suitable to be passed to :code:`subprocess.Popen`
and friends.
When specifying options, the following assumptions are made:
* Option names begin with :code:`--` and any :code:`_` is assumed to be a :code:`-`
* If the value is :code:`NO_VALUE`, this is a "naked" option.
* If the value is a string or an int, these are presented as the value of the option.
* If the value is a list, the option will be repeated multiple times.
* If the value is a dict, the option will be repeated multiple times, and
its values will be :code:`<KEY>=<VALUE>`. |
def mouse_click(self, widget, event=None):
"""Triggered when mouse click is pressed in the history tree. The method shows all scoped data for an execution
step as tooltip or fold and unfold the tree by double-click and select respective state for double clicked
element.
"""
if event.type == Gdk.EventType._2BUTTON_PRESS and event.get_button()[1] == 1:
(model, row) = self.history_tree.get_selection().get_selected()
if row is not None:
histroy_item_path = self.history_tree_store.get_path(row)
histroy_item_iter = self.history_tree_store.get_iter(histroy_item_path)
# logger.info(history_item.state_reference)
# TODO generalize double-click folding and unfolding -> also used in states tree of state machine
if histroy_item_path is not None and self.history_tree_store.iter_n_children(histroy_item_iter):
if self.history_tree.row_expanded(histroy_item_path):
self.history_tree.collapse_row(histroy_item_path)
else:
self.history_tree.expand_to_path(histroy_item_path)
sm = self.get_history_item_for_tree_iter(histroy_item_iter).state_reference.get_state_machine()
if sm:
if sm.state_machine_id != self.model.selected_state_machine_id:
self.model.selected_state_machine_id = sm.state_machine_id
else:
logger.info("No state machine could be found for selected item's state reference and "
"therefore no selection is performed.")
return
active_sm_m = self.model.get_selected_state_machine_model()
assert active_sm_m.state_machine is sm
state_path = self.get_history_item_for_tree_iter(histroy_item_iter).state_reference.get_path()
ref_state_m = active_sm_m.get_state_model_by_path(state_path)
if ref_state_m and active_sm_m:
active_sm_m.selection.set(ref_state_m)
return True
if event.type == Gdk.EventType.BUTTON_PRESS and event.get_button()[1] == 2:
x = int(event.x)
y = int(event.y)
pthinfo = self.history_tree.get_path_at_pos(x, y)
if pthinfo is not None:
path, col, cellx, celly = pthinfo
self.history_tree.grab_focus()
self.history_tree.set_cursor(path, col, 0)
self.open_selected_history_separately(None)
if event.type == Gdk.EventType.BUTTON_PRESS and event.get_button()[1] == 3:
x = int(event.x)
y = int(event.y)
time = event.time
pthinfo = self.history_tree.get_path_at_pos(x, y)
if pthinfo is not None:
path, col, cellx, celly = pthinfo
self.history_tree.grab_focus()
self.history_tree.set_cursor(path, col, 0)
popup_menu = Gtk.Menu()
model, row = self.history_tree.get_selection().get_selected()
history_item = model[row][self.HISTORY_ITEM_STORAGE_ID]
if not isinstance(history_item, ScopedDataItem) or history_item.scoped_data is None:
return
scoped_data = history_item.scoped_data
input_output_data = history_item.child_state_input_output_data
state_reference = history_item.state_reference
self.append_string_to_menu(popup_menu, "------------------------")
self.append_string_to_menu(popup_menu, "Scoped Data: ")
self.append_string_to_menu(popup_menu, "------------------------")
for key, data in scoped_data.items():
menu_item_string = " %s (%s - %s):\t%s" % (
data.name.replace("_", "__"), key, data.value_type, data.value)
self.append_string_to_menu(popup_menu, menu_item_string)
if input_output_data:
if isinstance(history_item, CallItem):
self.append_string_to_menu(popup_menu, "------------------------")
self.append_string_to_menu(popup_menu, "Input Data:")
self.append_string_to_menu(popup_menu, "------------------------")
else:
self.append_string_to_menu(popup_menu, "------------------------")
self.append_string_to_menu(popup_menu, "Output Data:")
self.append_string_to_menu(popup_menu, "------------------------")
for key, data in input_output_data.items():
menu_item_string = " %s :\t%s" % (key.replace("_", "__"), data)
self.append_string_to_menu(popup_menu, menu_item_string)
if state_reference:
if history_item.outcome:
self.append_string_to_menu(popup_menu, "------------------------")
final_outcome_menu_item_string = "Final outcome: " + str(history_item.outcome)
self.append_string_to_menu(popup_menu, final_outcome_menu_item_string)
self.append_string_to_menu(popup_menu, "------------------------")
popup_menu.show()
popup_menu.popup(None, None, None, None, event.get_button()[1], time)
return True | Triggered when mouse click is pressed in the history tree. The method shows all scoped data for an execution
step as tooltip or fold and unfold the tree by double-click and select respective state for double clicked
element. |
def info(
self,
page: 'WikipediaPage'
) -> 'WikipediaPage':
"""
https://www.mediawiki.org/w/api.php?action=help&modules=query%2Binfo
https://www.mediawiki.org/wiki/API:Info
"""
params = {
'action': 'query',
'prop': 'info',
'titles': page.title,
'inprop': '|'.join([
'protection',
'talkid',
'watched',
'watchers',
'visitingwatchers',
'notificationtimestamp',
'subjectid',
'url',
'readable',
'preload',
'displaytitle'
])
}
raw = self._query(
page,
params
)
self._common_attributes(raw['query'], page)
pages = raw['query']['pages']
for k, v in pages.items():
if k == '-1':
page._attributes['pageid'] = -1
return page
else:
return self._build_info(v, page)
return page | https://www.mediawiki.org/w/api.php?action=help&modules=query%2Binfo
https://www.mediawiki.org/wiki/API:Info |
def hash_from_func(cls, func):
"""Return a hashlib-compatible object for the multihash `func`.
If the `func` is registered but no hashlib-compatible constructor is
available for it, `None` is returned. If the `func` is not
registered, a `KeyError` is raised.
>>> h = FuncReg.hash_from_func(Func.sha2_256)
>>> h.name
'sha256'
"""
new = cls._func_hash[func].new
return new() if new else None | Return a hashlib-compatible object for the multihash `func`.
If the `func` is registered but no hashlib-compatible constructor is
available for it, `None` is returned. If the `func` is not
registered, a `KeyError` is raised.
>>> h = FuncReg.hash_from_func(Func.sha2_256)
>>> h.name
'sha256' |
def data(self, value):
"""The data property.
Args:
value (object). the property value.
"""
if value == self._defaults['data'] and 'data' in self._values:
del self._values['data']
else:
self._values['data'] = value | The data property.
Args:
value (object). the property value. |
def _ConvertFloat(value):
"""Convert an floating point number."""
if value == 'nan':
raise ParseError('Couldn\'t parse float "nan", use "NaN" instead.')
try:
# Assume Python compatible syntax.
return float(value)
except ValueError:
# Check alternative spellings.
if value == _NEG_INFINITY:
return float('-inf')
elif value == _INFINITY:
return float('inf')
elif value == _NAN:
return float('nan')
else:
raise ParseError('Couldn\'t parse float: {0}.'.format(value)) | Convert an floating point number. |
def _unique_constrains(cls):
"""Get all (single column and multi column) unique constraints"""
unique = [{c.name for c in u.columns} for u in cls.__table_args__
if isinstance(u, UniqueConstraint)]
unique.extend({c.name} for c in cls.__table__.columns if c.unique)
return unique | Get all (single column and multi column) unique constraints |
def Subclasses(cls, sort_by=None, reverse=False):
"""Get all nested Constant class and it's name pair.
:param sort_by: the attribute name used for sorting.
:param reverse: if True, return in descend order.
:returns: [(attr, value),...] pairs.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> MyClass.Subclasses()
[("C", MyClass.C), ("D", MyClass.D)]
.. versionadded:: 0.0.3
"""
l = list()
for attr, value in get_all_attributes(cls):
try:
if issubclass(value, Constant):
l.append((attr, value))
except:
pass
if sort_by is None:
sort_by = "__creation_index__"
l = list(
sorted(l, key=lambda x: getattr(x[1], sort_by), reverse=reverse))
return l | Get all nested Constant class and it's name pair.
:param sort_by: the attribute name used for sorting.
:param reverse: if True, return in descend order.
:returns: [(attr, value),...] pairs.
::
>>> class MyClass(Constant):
... a = 1 # non-class attributre
... b = 2 # non-class attributre
...
... class C(Constant):
... pass
...
... class D(Constant):
... pass
>>> MyClass.Subclasses()
[("C", MyClass.C), ("D", MyClass.D)]
.. versionadded:: 0.0.3 |
def parse_uri(config_uri):
"""
Parse the ``config_uri`` into a :class:`plaster.PlasterURL` object.
``config_uri`` can be a relative or absolute file path such as
``development.ini`` or ``/path/to/development.ini``. The file must have
an extension that can be handled by a :class:`plaster.ILoader`
registered with the system.
Alternatively, ``config_uri`` may be a :rfc:`1738`-style string.
"""
if isinstance(config_uri, PlasterURL):
return config_uri
# force absolute paths to look like a uri for more accurate parsing
# we throw away the dummy scheme later and parse it from the resolved
# path extension
isabs = os.path.isabs(config_uri)
if isabs:
config_uri = 'dummy://' + config_uri
# check if the uri is actually a url
parts = urlparse.urlparse(config_uri)
# reconstruct the path without the scheme and fragment
path = urlparse.ParseResult(
scheme='',
netloc=parts.netloc,
path=parts.path,
params='',
query='',
fragment='',
).geturl()
# strip off leading //
if path.startswith('//'):
path = path[2:]
if parts.scheme and not isabs:
scheme = parts.scheme
else:
scheme = os.path.splitext(path)[1]
if scheme.startswith('.'):
scheme = scheme[1:]
# tag uris coming from file extension as file+scheme
if scheme:
scheme = 'file+' + scheme
query = parts.query if parts.query else None
options = OrderedDict()
if query:
options.update(urlparse.parse_qsl(query))
fragment = parts.fragment if parts.fragment else None
if not scheme:
raise InvalidURI(config_uri, (
'Could not determine the loader scheme for the supplied '
'config_uri "{0}"'.format(config_uri)))
return PlasterURL(
scheme=scheme,
path=path,
options=options,
fragment=fragment,
) | Parse the ``config_uri`` into a :class:`plaster.PlasterURL` object.
``config_uri`` can be a relative or absolute file path such as
``development.ini`` or ``/path/to/development.ini``. The file must have
an extension that can be handled by a :class:`plaster.ILoader`
registered with the system.
Alternatively, ``config_uri`` may be a :rfc:`1738`-style string. |
def set_plain_text(self, text, is_code):
"""Set plain text docs"""
# text is coming from utils.dochelpers.getdoc
if type(text) is dict:
name = text['name']
if name:
rst_title = ''.join(['='*len(name), '\n', name, '\n',
'='*len(name), '\n\n'])
else:
rst_title = ''
if text['argspec']:
definition = ''.join(['Definition: ', name, text['argspec'],
'\n'])
else:
definition = ''
if text['note']:
note = ''.join(['Type: ', text['note'], '\n\n----\n\n'])
else:
note = ''
full_text = ''.join([rst_title, definition, note,
text['docstring']])
else:
full_text = text
self.plain_text.set_text(full_text, is_code)
self.save_text([self.plain_text.set_text, full_text, is_code]) | Set plain text docs |
def organisation_logo_path(feature, parent):
"""Retrieve the full path of used specified organisation logo."""
_ = feature, parent # NOQA
organisation_logo_file = setting(
inasafe_organisation_logo_path['setting_key'])
if os.path.exists(organisation_logo_file):
return organisation_logo_file
else:
LOGGER.info(
'The custom organisation logo is not found in {logo_path}. '
'Default organisation logo will be used.').format(
logo_path=organisation_logo_file)
return inasafe_default_settings['organisation_logo_path'] | Retrieve the full path of used specified organisation logo. |
def warp(self, order):
"""对order/market的封装
[description]
Arguments:
order {[type]} -- [description]
Returns:
[type] -- [description]
"""
# 因为成交模式对时间的封装
if order.order_model == ORDER_MODEL.MARKET:
if order.frequence is FREQUENCE.DAY:
# exact_time = str(datetime.datetime.strptime(
# str(order.datetime), '%Y-%m-%d %H-%M-%S') + datetime.timedelta(day=1))
order.date = order.datetime[0:10]
order.datetime = '{} 09:30:00'.format(order.date)
elif order.frequence in [FREQUENCE.ONE_MIN,
FREQUENCE.FIVE_MIN,
FREQUENCE.FIFTEEN_MIN,
FREQUENCE.THIRTY_MIN,
FREQUENCE.SIXTY_MIN]:
exact_time = str(
datetime.datetime
.strptime(str(order.datetime),
'%Y-%m-%d %H:%M:%S') +
datetime.timedelta(minutes=1)
)
order.date = exact_time[0:10]
order.datetime = exact_time
self.market_data = self.get_market(order)
if self.market_data is None:
return order
order.price = (
float(self.market_data["high"]) +
float(self.market_data["low"])
) * 0.5
elif order.order_model == ORDER_MODEL.NEXT_OPEN:
try:
exact_time = str(
datetime.datetime
.strptime(str(order.datetime),
'%Y-%m-%d %H-%M-%S') + datetime.timedelta(day=1)
)
order.date = exact_time[0:10]
order.datetime = '{} 09:30:00'.format(order.date)
except:
order.datetime = '{} 15:00:00'.format(order.date)
self.market_data = self.get_market(order)
if self.market_data is None:
return order
order.price = float(self.market_data["close"])
elif order.order_model == ORDER_MODEL.CLOSE:
try:
order.datetime = self.market_data.datetime
except:
if len(str(order.datetime)) == 19:
pass
else:
order.datetime = '{} 15:00:00'.format(order.date)
self.market_data = self.get_market(order)
if self.market_data is None:
return order
order.price = float(self.market_data["close"])
elif order.order_model == ORDER_MODEL.STRICT:
'加入严格模式'
if order.frequence is FREQUENCE.DAY:
exact_time = str(
datetime.datetime
.strptime(order.datetime,
'%Y-%m-%d %H-%M-%S') + datetime.timedelta(day=1)
)
order.date = exact_time[0:10]
order.datetime = '{} 09:30:00'.format(order.date)
elif order.frequence in [FREQUENCE.ONE_MIN,
FREQUENCE.FIVE_MIN,
FREQUENCE.FIFTEEN_MIN,
FREQUENCE.THIRTY_MIN,
FREQUENCE.SIXTY_MIN]:
exact_time = str(
datetime.datetime
.strptime(order.datetime,
'%Y-%m-%d %H-%M-%S') +
datetime.timedelta(minute=1)
)
order.date = exact_time[0:10]
order.datetime = exact_time
self.market_data = self.get_market(order)
if self.market_data is None:
return order
if order.towards == 1:
order.price = float(self.market_data["high"])
else:
order.price = float(self.market_data["low"])
return order | 对order/market的封装
[description]
Arguments:
order {[type]} -- [description]
Returns:
[type] -- [description] |
def disveclayers(self, x, y, layers, aq=None):
'''Returns two arrays of size len(layers)
only used in building equations'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
qxqy = self.disvec(x, y, aq)
rv = np.sum(qxqy[:,np.newaxis,:] * aq.eigvec, 2)
return rv[:,layers] | Returns two arrays of size len(layers)
only used in building equations |
def option_present(name, value, reload=False):
'''
Ensure the state of a particular option/setting in csf.
name
The option name in csf.conf
value
The value it should be set to.
reload
Boolean. If set to true, csf will be reloaded after.
'''
ret = {'name': 'testing mode',
'changes': {},
'result': True,
'comment': 'Option already present.'}
option = name
current_option = __salt__['csf.get_option'](option)
if current_option:
l = __salt__['csf.split_option'](current_option)
option_value = l[1]
if '"{0}"'.format(value) == option_value:
return ret
else:
result = __salt__['csf.set_option'](option, value)
ret['comment'] = 'Option modified.'
ret['changes']['Option'] = 'Changed'
else:
result = __salt__['file.append']('/etc/csf/csf.conf',
args='{0} = "{1}"'.format(option, value))
ret['comment'] = 'Option not present. Appended to csf.conf'
ret['changes']['Option'] = 'Changed.'
if reload:
if __salt__['csf.reload']():
ret['comment'] += '. Csf reloaded.'
else:
ret['comment'] += '. Csf failed to reload.'
ret['result'] = False
return ret | Ensure the state of a particular option/setting in csf.
name
The option name in csf.conf
value
The value it should be set to.
reload
Boolean. If set to true, csf will be reloaded after. |
def reader(ltsvfile, labels=None):
"""Make LTSV Reader for reading selected labels.
:param ltsvfile: iterable of lines.
:param labels: sequence of labels. (optional)
:return: generator of record in [[label, value], ...] form.
"""
label_pattern = re.compile(r"^[0-9A-Za-z_.-]+:")
if labels is not None:
prefixes = tuple(L + ':' for L in labels
if label_pattern.match(L + ':'))
for record in ltsvfile:
record = record.rstrip('\r\n')
yield [x.split(':', 1) for x in record.split('\t')
if x.startswith(prefixes)]
return
for record in ltsvfile:
record = record.rstrip('\r\n')
yield [x.split(':', 1) for x in record.split('\t')
if label_pattern.match(x)] | Make LTSV Reader for reading selected labels.
:param ltsvfile: iterable of lines.
:param labels: sequence of labels. (optional)
:return: generator of record in [[label, value], ...] form. |
def append(self, value):
"""Appends an item to the list. Similar to list.append()."""
self._values.append(self._type_checker.CheckValue(value))
if not self._message_listener.dirty:
self._message_listener.Modified() | Appends an item to the list. Similar to list.append(). |
def get_cpds(self, node=None, time_slice=0):
"""
Returns the CPDs that have been associated with the network.
Parameters
----------
node: tuple (node_name, time_slice)
The node should be in the following form (node_name, time_slice).
Here, node_name is the node that is inserted while the time_slice is
an integer value, which denotes the index of the time_slice that the
node belongs to.
time_slice: int
The time_slice should be a positive integer greater than or equal to zero.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.factors.discrete import TabularCPD
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('D',0),('D',1)),(('I',0),('I',1))])
>>> grade_cpd = TabularCPD(('G',0), 3, [[0.3,0.05,0.9,0.5],
... [0.4,0.25,0.8,0.03],
... [0.3,0.7,0.02,0.2]], [('I', 0),('D', 0)],[2,2])
>>> dbn.add_cpds(grade_cpd)
>>> dbn.get_cpds()
"""
# TODO: fix bugs in this
if node:
if node not in super(DynamicBayesianNetwork, self).nodes():
raise ValueError('Node not present in the model.')
else:
for cpd in self.cpds:
if cpd.variable == node:
return cpd
else:
return [cpd for cpd in self.cpds if set(list(cpd.variables)).issubset(self.get_slice_nodes(time_slice))] | Returns the CPDs that have been associated with the network.
Parameters
----------
node: tuple (node_name, time_slice)
The node should be in the following form (node_name, time_slice).
Here, node_name is the node that is inserted while the time_slice is
an integer value, which denotes the index of the time_slice that the
node belongs to.
time_slice: int
The time_slice should be a positive integer greater than or equal to zero.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.factors.discrete import TabularCPD
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('D',0),('D',1)),(('I',0),('I',1))])
>>> grade_cpd = TabularCPD(('G',0), 3, [[0.3,0.05,0.9,0.5],
... [0.4,0.25,0.8,0.03],
... [0.3,0.7,0.02,0.2]], [('I', 0),('D', 0)],[2,2])
>>> dbn.add_cpds(grade_cpd)
>>> dbn.get_cpds() |
def snapshot_identifier(prefix, db_identifier):
"""Return an identifier for a snapshot of a database or cluster.
"""
now = datetime.now()
return '%s-%s-%s' % (prefix, db_identifier, now.strftime('%Y-%m-%d-%H-%M')) | Return an identifier for a snapshot of a database or cluster. |
def refresh_access_information(self, refresh_token):
"""Return updated access information for an OAuth2 authorization grant.
:param refresh_token: the refresh token used to obtain the updated
information
:returns: A dictionary with the key/value pairs for access_token,
refresh_token and scope. The refresh_token value will be done when
the OAuth2 grant is not refreshable. The scope value will be a set
containing the scopes the tokens are valid for.
Password grants aren't refreshable, so use `get_access_information()`
again, instead.
"""
if self.config.grant_type == 'password':
data = {'grant_type': 'password',
'username': self.config.user,
'password': self.config.pswd}
else:
data = {'grant_type': 'refresh_token',
'redirect_uri': self.redirect_uri,
'refresh_token': refresh_token}
retval = self._handle_oauth_request(data)
return {'access_token': retval['access_token'],
'refresh_token': refresh_token,
'scope': set(retval['scope'].split(' '))} | Return updated access information for an OAuth2 authorization grant.
:param refresh_token: the refresh token used to obtain the updated
information
:returns: A dictionary with the key/value pairs for access_token,
refresh_token and scope. The refresh_token value will be done when
the OAuth2 grant is not refreshable. The scope value will be a set
containing the scopes the tokens are valid for.
Password grants aren't refreshable, so use `get_access_information()`
again, instead. |
def move(self, u_function):
"""
Move a mesh by using an external function which prescribes the displacement
at any point in space.
Useful for manipulating ``dolfin`` meshes.
"""
if self.mesh:
self.u = u_function
delta = [u_function(p) for p in self.mesh.coordinates()]
movedpts = self.mesh.coordinates() + delta
self.polydata(False).GetPoints().SetData(numpy_to_vtk(movedpts))
self.poly.GetPoints().Modified()
self.u_values = delta
else:
colors.printc("Warning: calling move() but actor.mesh is", self.mesh, c=3)
return self | Move a mesh by using an external function which prescribes the displacement
at any point in space.
Useful for manipulating ``dolfin`` meshes. |
def output(self, message, color=None):
"""
A helper to used like print() or click's secho() tunneling all the
outputs to sys.stdout or sys.stderr
:param message: (str)
:param color: (str) check click.secho() documentation
:return: (None) prints to sys.stdout or sys.stderr
"""
output_to = stderr if color == "red" else stdout
secho(self.indent(message), fg=color, file=output_to) | A helper to used like print() or click's secho() tunneling all the
outputs to sys.stdout or sys.stderr
:param message: (str)
:param color: (str) check click.secho() documentation
:return: (None) prints to sys.stdout or sys.stderr |
def copy_and_move_messages(from_channel, to_channel):
"""
While splitting channel and moving chosen subscribers to new channel,
old channel's messages are copied and moved to new channel.
Args:
from_channel (Channel object): move messages from channel
to_channel (Channel object): move messages to channel
"""
with BlockSave(Message, query_dict={'channel_id': to_channel.key}):
for message in Message.objects.filter(channel=from_channel, typ=15):
message.key = ''
message.channel = to_channel
message.save() | While splitting channel and moving chosen subscribers to new channel,
old channel's messages are copied and moved to new channel.
Args:
from_channel (Channel object): move messages from channel
to_channel (Channel object): move messages to channel |
def get_domain_template(distro, libvirt_ver, **kwargs):
"""
Get a rendered Jinja2 domain template
Args:
distro(str): domain distro
libvirt_ver(int): libvirt version
kwargs(dict): args for template render
Returns:
str: rendered template
"""
env = Environment(
loader=PackageLoader('lago', 'providers/libvirt/templates'),
trim_blocks=True,
lstrip_blocks=True,
)
template_name = 'dom_template-{0}.xml.j2'.format(distro)
try:
template = env.get_template(template_name)
except TemplateNotFound:
LOGGER.debug('could not find template %s using default', template_name)
template = env.get_template('dom_template-base.xml.j2')
return template.render(libvirt_ver=libvirt_ver, **kwargs) | Get a rendered Jinja2 domain template
Args:
distro(str): domain distro
libvirt_ver(int): libvirt version
kwargs(dict): args for template render
Returns:
str: rendered template |
def _from_dict(cls, _dict):
"""Initialize a Word object from a json dictionary."""
args = {}
if 'word' in _dict:
args['word'] = _dict.get('word')
else:
raise ValueError(
'Required property \'word\' not present in Word JSON')
if 'sounds_like' in _dict:
args['sounds_like'] = _dict.get('sounds_like')
else:
raise ValueError(
'Required property \'sounds_like\' not present in Word JSON')
if 'display_as' in _dict:
args['display_as'] = _dict.get('display_as')
else:
raise ValueError(
'Required property \'display_as\' not present in Word JSON')
if 'count' in _dict:
args['count'] = _dict.get('count')
else:
raise ValueError(
'Required property \'count\' not present in Word JSON')
if 'source' in _dict:
args['source'] = _dict.get('source')
else:
raise ValueError(
'Required property \'source\' not present in Word JSON')
if 'error' in _dict:
args['error'] = [
WordError._from_dict(x) for x in (_dict.get('error'))
]
return cls(**args) | Initialize a Word object from a json dictionary. |
def merge_trd_mkt_stock_str(trd_mkt, partial_stock_str):
"""
Merge the string of stocks
:param market: market code
:param partial_stock_str: original stock code string. i.e. "AAPL","00700", "000001"
:return: unified representation of a stock code. i.e. "US.AAPL", "HK.00700", "SZ.000001"
"""
mkt_qot = Market.NONE
mkt = TRADE.REV_TRD_MKT_MAP[trd_mkt] if trd_mkt in TRADE.REV_TRD_MKT_MAP else TrdMarket.NONE
if mkt == TrdMarket.HK:
mkt_qot = Market.HK
elif mkt == TrdMarket.US:
mkt_qot = Market.US
elif mkt == TrdMarket.HKCC or mkt == TrdMarket.CN:
if partial_stock_str.startswith('6') or partial_stock_str.startswith('9'):
mkt_qot = Market.SH
else:
mkt_qot = Market.SZ
else:
raise Exception("merge_trd_mkt_stock_str: unknown trd_mkt.")
return merge_qot_mkt_stock_str(MKT_MAP[mkt_qot], partial_stock_str) | Merge the string of stocks
:param market: market code
:param partial_stock_str: original stock code string. i.e. "AAPL","00700", "000001"
:return: unified representation of a stock code. i.e. "US.AAPL", "HK.00700", "SZ.000001" |
def gen_radio_list(sig_dic):
'''
For generating List view HTML file for RADIO.
for each item.
'''
view_zuoxiang = '''<span class="iga_pd_val">'''
dic_tmp = sig_dic['dic']
for key in dic_tmp.keys():
tmp_str = '''{{% if postinfo.extinfo['{0}'][0] == "{1}" %}} {2} {{% end %}}
'''.format(sig_dic['en'], key, dic_tmp[key])
view_zuoxiang += tmp_str
view_zuoxiang += '''</span>'''
return view_zuoxiang | For generating List view HTML file for RADIO.
for each item. |
def fileUpd(self, buffer=None, filename=None, ufilename=None, desc=None):
"""Update annotation attached file."""
CheckParent(self)
return _fitz.Annot_fileUpd(self, buffer, filename, ufilename, desc) | Update annotation attached file. |
def __add_images_to_manifest(self):
"""Add entries for py3o images into the manifest file."""
xpath_expr = "//manifest:manifest[1]"
for content_tree in self.content_trees:
# Find manifest:manifest tags.
manifest_e = content_tree.xpath(
xpath_expr,
namespaces=self.namespaces
)
if not manifest_e:
continue
for identifier in self.images.keys():
# Add a manifest:file-entry tag.
lxml.etree.SubElement(
manifest_e[0],
'{%s}file-entry' % self.namespaces['manifest'],
attrib={
'{%s}full-path' % self.namespaces['manifest']: (
PY3O_IMAGE_PREFIX + identifier
),
'{%s}media-type' % self.namespaces['manifest']: '',
}
) | Add entries for py3o images into the manifest file. |
def line_to(self, x, y):
"""Adds a line to the path from the current point
to position ``(x, y)`` in user-space coordinates.
After this call the current point will be ``(x, y)``.
If there is no current point before the call to :meth:`line_to`
this method will behave as ``context.move_to(x, y)``.
:param x: X coordinate of the end of the new line.
:param y: Y coordinate of the end of the new line.
:type float: x
:type float: y
"""
cairo.cairo_line_to(self._pointer, x, y)
self._check_status() | Adds a line to the path from the current point
to position ``(x, y)`` in user-space coordinates.
After this call the current point will be ``(x, y)``.
If there is no current point before the call to :meth:`line_to`
this method will behave as ``context.move_to(x, y)``.
:param x: X coordinate of the end of the new line.
:param y: Y coordinate of the end of the new line.
:type float: x
:type float: y |
def convert_tensor_float_to_float16(tensor):
'''
Convert tensor float to float16.
:param tensor: TensorProto object
:return tensor_float16: converted TensorProto object
Example:
::
from onnxmltools.utils.float16_converter import convert_tensor_float_to_float16
new_tensor = convert_tensor_float_to_float16(tensor)
'''
if not isinstance(tensor, onnx_proto.TensorProto):
raise ValueError('Expected input type is an ONNX TensorProto but got %s' % type(tensor))
if tensor.data_type == onnx_proto.TensorProto.FLOAT:
tensor.data_type = onnx_proto.TensorProto.FLOAT16
# convert float_data (float type) to float16 and write to int32_data
if tensor.float_data:
int_list = _npfloat16_to_int(np.float16(tensor.float_data))
tensor.int32_data[:] = int_list
tensor.float_data[:] = []
# convert raw_data (bytes type)
if tensor.raw_data:
# convert n.raw_data to float
float32_list = np.fromstring(tensor.raw_data, dtype='float32')
# convert float to float16
float16_list = np.float16(float32_list)
# convert float16 to bytes and write back to raw_data
tensor.raw_data = float16_list.tostring()
return tensor | Convert tensor float to float16.
:param tensor: TensorProto object
:return tensor_float16: converted TensorProto object
Example:
::
from onnxmltools.utils.float16_converter import convert_tensor_float_to_float16
new_tensor = convert_tensor_float_to_float16(tensor) |
def delete_files_within_dir(directory: str, filenames: List[str]) -> None:
"""
Delete files within ``directory`` whose filename *exactly* matches one of
``filenames``.
"""
for dirpath, dirnames, fnames in os.walk(directory):
for f in fnames:
if f in filenames:
fullpath = os.path.join(dirpath, f)
log.debug("Deleting {!r}", fullpath)
os.remove(fullpath) | Delete files within ``directory`` whose filename *exactly* matches one of
``filenames``. |
def process_rewards(self, rewards):
"""Clips, rounds, and changes to integer type.
Args:
rewards: numpy array of raw (float) rewards.
Returns:
processed_rewards: numpy array of np.int64
"""
min_reward, max_reward = self.reward_range
# Clips at min and max reward.
rewards = np.clip(rewards, min_reward, max_reward)
# Round to (nearest) int and convert to integral type.
rewards = np.around(rewards, decimals=0).astype(np.int64)
return rewards | Clips, rounds, and changes to integer type.
Args:
rewards: numpy array of raw (float) rewards.
Returns:
processed_rewards: numpy array of np.int64 |
def stix_embedding_pred(self, parent, child, ns_mapping):
"""
Predicate for recognizing inlined content in an XML; to
be used for DINGO's xml-import hook 'embedded_predicate'.
The question this predicate must answer is whether
the child should be extracted into a separate object.
The function returns either
- False (the child is not to be extracted)
- True (the child is extracted but nothing can be inferred
about what kind of object is extracted)
- a string giving some indication about the object type
(if nothing else is known: the name of the element, often the
namespace of the embedded object)
- a dictionary, of the following form::
{'id_and_revision_info' : { 'id': something/None,
'ts': something/None,
... other information you want to
record for this object for later usage,
},
'embedded_ns': False/True/some indication about object type as string}
Note: the 'parent' and 'child' arguments are XMLNodes as defined
by the Python libxml2 bindings. If you have never worked with these, have a look at
- Mike Kneller's brief intro: http://mikekneller.com/kb/python/libxml2python/part1
- the functions in django-dingos core.xml_utils module
"""
def extract_typeinfo(child):
"""
When recognizing an embedding object, we try to find out what
kind of object it is
"""
# Let's try to find a grandchild and return the namespace of this grandchild:
# This can be used as indicator for the object type that is referenced here.
# Let's further try to find a grandchild (if there is one)
grandchild = child.children
type_info = None
while grandchild is not None:
try:
grandchild_attrs = extract_attributes(grandchild, prefix_key_char='')
if 'xsi:type' in grandchild_attrs and grandchild.name=='Properties':
type_info = grandchild_attrs['xsi:type'].split(':')[0]
else:
type_info = grandchild.ns().name
break
except:
# This catches if the grandchild does not have a namespace
grandchild = grandchild.next
if type_info:
logger.debug("Found type info %s" % type_info)
return type_info
else:
logger.debug("Embedding, but did not find type info")
return True
child_attributes = extract_attributes(child, prefix_key_char='')
parent_attrs = extract_attributes(parent, prefix_key_char='')
# We start with some special cases. If we find
# OpenIOC content in a test mechanism, we use the embedded-predicate
# to pass along additional information: by setting the key 'defer_processing'
# in the 'id_and_revision_info', we tell the DINGOS xml_importer to
# return the child element unprocessed.
if parent.name=='Test_Mechanism':
if 'xsi:type' in parent_attrs:
if 'OpenIOC2010TestMechanismType' in parent_attrs['xsi:type']:
# We have an embedded OpenIOC document.
# We extract id and revision info and tag it for deferred treatement
id_and_revision_info = OpenIOC_Importer.id_and_revision_extractor(child)
id_and_revision_info['defer_processing'] = {'processor': 'OpenIOC2010'}
logger.debug("XXX: Found OpenIOC with %s" % id_and_revision_info)
return {'embedded_ns':child.ns().name,
'id_and_revision_info':id_and_revision_info}
if parent.name=='Kill_Chain' and child.name=='Kill_Chain_Phase':
# We also extract kill-chain phases. Not sure whether that
# is the best idea, though.
logger.debug("Found killchain phase %s" % extract_typeinfo(child))
return extract_typeinfo(child)
if parent.name=='Handling' and child.name=='Marking' and parent.get_parent().name =='STIX_Header':
# We also cut out Markings, because we need an InfoObject with the Marking
# contents in order to create markings for the import.
# We seed the id_and_revision info for this object with a tag 'import_first',
# which will be read later below in the xml_import function: marking objects
# will be imported first and a marking dictionary will provide information on
# which STIX Package provided which marking.
# Markings do not carry identifiers. The xml_importer will automagically create
# an identifier derived from the identifier of the encompassing object.
return {'embedded_ns': extract_typeinfo(child),
'id_and_revision_info' : {'import_first': True}}
# Finally, the standard case: an 'id' attribute signifies
# an embedded object that we want to extract.
if ('id' in child_attributes):
return extract_typeinfo(child)
# 'object_reference' is used in Cybox as follows::
#
# (...)
# <EmailMessageObj:Attachments>
# <EmailMessageObj:File xsi:type="FileObj:FileObjectType"
# object_reference="cybox:object-3cf6a958-5c3f-11e2-a06c-0050569761d3"/>
# </EmailMessageObj:Attachments>
# (...)
#
# That is actually a reference, but we need to turn it into an '@idref'-reference.
# By treating the 'File' object-reference as an embedded object, this is done
# automatically, because the xml_importer replaces embedded content with
# '@idref'-based references.
if 'object_reference' in child_attributes:
return extract_typeinfo(child)
if child.name=='Object' and not 'idref' in child_attributes:
# Unfortunately, e.g., the example files created by MITRE from Mandiant reports
# and OpenIOCs give an identifier to an observable, but not to the
# object embedded in the observable. We, however, need an identifier for
# the object, because otherwise the whole machinery that infers an object's
# type does not work. So, if we find an object without identifier that
# is embedded in an observable with identifier, we also want to extract
# the object ... and need to derive the object identifier from the
# observable identifier. This is done automagically by the xml_import
# function itself.
return extract_typeinfo(child)
return False | Predicate for recognizing inlined content in an XML; to
be used for DINGO's xml-import hook 'embedded_predicate'.
The question this predicate must answer is whether
the child should be extracted into a separate object.
The function returns either
- False (the child is not to be extracted)
- True (the child is extracted but nothing can be inferred
about what kind of object is extracted)
- a string giving some indication about the object type
(if nothing else is known: the name of the element, often the
namespace of the embedded object)
- a dictionary, of the following form::
{'id_and_revision_info' : { 'id': something/None,
'ts': something/None,
... other information you want to
record for this object for later usage,
},
'embedded_ns': False/True/some indication about object type as string}
Note: the 'parent' and 'child' arguments are XMLNodes as defined
by the Python libxml2 bindings. If you have never worked with these, have a look at
- Mike Kneller's brief intro: http://mikekneller.com/kb/python/libxml2python/part1
- the functions in django-dingos core.xml_utils module |
def summary(self, title, sentences=0, chars=0, auto_suggest=True, redirect=True):
""" Get the summary for the title in question
Args:
title (str): Page title to summarize
sentences (int): Number of sentences to return in summary
chars (int): Number of characters to return in summary
auto_suggest (bool): Run auto-suggest on title before \
summarizing
redirect (bool): Use page redirect on title before summarizing
Returns:
str: The summarized results of the page
Note:
Precedence for parameters: sentences then chars; if both are \
0 then the entire first section is returned """
page_info = self.page(title, auto_suggest=auto_suggest, redirect=redirect)
return page_info.summarize(sentences, chars) | Get the summary for the title in question
Args:
title (str): Page title to summarize
sentences (int): Number of sentences to return in summary
chars (int): Number of characters to return in summary
auto_suggest (bool): Run auto-suggest on title before \
summarizing
redirect (bool): Use page redirect on title before summarizing
Returns:
str: The summarized results of the page
Note:
Precedence for parameters: sentences then chars; if both are \
0 then the entire first section is returned |
def plot_cpu_mem_keypoints(self):
"""绘制CPU/Mem/特征点数量."""
plt.figure(1)
# 开始绘制子图:
plt.subplot(311)
title = self._get_graph_title()
plt.title(title, loc="center") # 设置绘图的标题
mem_ins = plt.plot(self.time_axis, self.mem_axis, "-", label="Mem(MB)", color='deepskyblue', linestyle='-', marker=',')
# 设置数字标签
plt.legend(mem_ins, ["Mem(MB)"], loc='upper right') # 说明标签的位置
plt.grid() # 加网格
plt.ylabel("Mem(MB)")
plt.ylim(bottom=0)
for method_exec in self.method_exec_info:
start_date = datetime.fromtimestamp(method_exec["start_time"])
end_date = datetime.fromtimestamp(method_exec["end_time"])
plt.vlines(start_date, 0, self.max_mem, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
plt.vlines(end_date, 0, self.max_mem, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
# 绘制mem文字:
x = datetime.fromtimestamp(method_exec["mem_max_time"])
text = "%s: %d MB" % (method_exec["name"], method_exec["mem_max"])
plt.text(x, method_exec["mem_max"], text, ha="center", va="bottom", fontsize=10)
plt.plot(x, method_exec["mem_max"], 'bo', label="point") # 绘制点
# 绘制子图2
plt.subplot(312)
cpu_ins = plt.plot(self.time_axis, self.cpu_axis, "-", label="CPU(%)", color='red', linestyle='-', marker=',')
plt.legend(cpu_ins, ["CPU(%)"], loc='upper right') # 说明标签的位置
plt.grid() # 加网格
plt.xlabel("Time(s)")
plt.ylabel("CPU(%)")
plt.ylim(0, 120)
for method_exec in self.method_exec_info:
start_date = datetime.fromtimestamp(method_exec["start_time"])
end_date = datetime.fromtimestamp(method_exec["end_time"])
plt.vlines(start_date, 0, 100, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
plt.vlines(end_date, 0, 100, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
# 绘制mem文字:
x = datetime.fromtimestamp(method_exec["cpu_max_time"])
text = "%s: %d%%" % (method_exec["name"], method_exec["cpu_max"])
plt.text(x, method_exec["cpu_max"], text, ha="center", va="bottom", fontsize=10)
plt.plot(x, method_exec["cpu_max"], 'ro', label="point") # 绘制点
# 绘制子图3
plt.subplot(313) # 绘制一下柱状图(关键点)
# 设置轴向标签
plt.xlabel('methods')
plt.ylabel('keypoints number')
method_list, method_pts_length_list, color_list = [], [], []
for method_exec in self.method_exec_info:
for item in ["kp_sch", "kp_src", "good"]:
method_list.append("%s-%s" % (method_exec["name"], item))
method_pts_length_list.append(method_exec[item])
if method_exec["result"]:
color_list.append(["palegreen", "limegreen", "deepskyblue"][["kp_sch", "kp_src", "good"].index(item)])
else:
color_list.append("tomato")
method_x = np.arange(len(method_list)) + 1
plt.bar(method_x, method_pts_length_list, width=0.35, align='center', color=color_list, alpha=0.8)
plt.xticks(method_x, method_list, size='small', rotation=30)
# 设置数字标签
for x, y in zip(method_x, method_pts_length_list):
plt.text(x, y + 10, "%d" % y, ha="center", va="bottom", fontsize=7)
plt.ylim(0, max(method_pts_length_list) * 1.2)
# 显示图像
plt.show() | 绘制CPU/Mem/特征点数量. |
def best_periods(self):
"""Compute the scores under the various models
Parameters
----------
periods : array_like
array of periods at which to compute scores
Returns
-------
best_periods : dict
Dictionary of best periods. Dictionary keys are the unique filter
names passed to fit()
"""
for (key, model) in self.models_.items():
model.optimizer = self.optimizer
return dict((filt, model.best_period)
for (filt, model) in self.models_.items()) | Compute the scores under the various models
Parameters
----------
periods : array_like
array of periods at which to compute scores
Returns
-------
best_periods : dict
Dictionary of best periods. Dictionary keys are the unique filter
names passed to fit() |
def hash(value, algorithm='sha512'):
'''
.. versionadded:: 2014.7.0
Encodes a value with the specified encoder.
value
The value to be hashed.
algorithm : sha512
The algorithm to use. May be any valid algorithm supported by
hashlib.
CLI Example:
.. code-block:: bash
salt '*' random.hash 'I am a string' md5
'''
if six.PY3 and isinstance(value, six.string_types):
# Under Python 3 we must work with bytes
value = value.encode(__salt_system_encoding__)
if hasattr(hashlib, ALGORITHMS_ATTR_NAME) and algorithm in getattr(hashlib, ALGORITHMS_ATTR_NAME):
hasher = hashlib.new(algorithm)
hasher.update(value)
out = hasher.hexdigest()
elif hasattr(hashlib, algorithm):
hasher = hashlib.new(algorithm)
hasher.update(value)
out = hasher.hexdigest()
else:
raise SaltInvocationError('You must specify a valid algorithm.')
return out | .. versionadded:: 2014.7.0
Encodes a value with the specified encoder.
value
The value to be hashed.
algorithm : sha512
The algorithm to use. May be any valid algorithm supported by
hashlib.
CLI Example:
.. code-block:: bash
salt '*' random.hash 'I am a string' md5 |
def semcor_to_offset(sensekey):
"""
Converts SemCor sensekey IDs to synset offset.
>>> print semcor_to_offset('live%2:42:06::')
02614387-v
"""
synset = wn.lemma_from_key(sensekey).synset
offset = '%08d-%s' % (synset.offset, synset.pos)
return offset | Converts SemCor sensekey IDs to synset offset.
>>> print semcor_to_offset('live%2:42:06::')
02614387-v |
def status_human(self):
"""
Human readable status
:return:
* `DOWNLOADING`: the task is downloading files
* `BEING TRANSFERRED`: the task is being transferred
* `TRANSFERRED`: the task has been transferred to downloads \
directory
* `SEARCHING RESOURCES`: the task is searching resources
* `FAILED`: the task is failed
* `DELETED`: the task is deleted
* `UNKNOWN STATUS`
:rtype: str
"""
res = None
if self._deleted:
return 'DELETED'
if self.status == 1:
res = 'DOWNLOADING'
elif self.status == 2:
if self.move == 0:
res = 'BEING TRANSFERRED'
elif self.move == 1:
res = 'TRANSFERRED'
elif self.move == 2:
res = 'PARTIALLY TRANSFERRED'
elif self.status == 4:
res = 'SEARCHING RESOURCES'
elif self.status == -1:
res = 'FAILED'
if res is not None:
return res
return 'UNKNOWN STATUS' | Human readable status
:return:
* `DOWNLOADING`: the task is downloading files
* `BEING TRANSFERRED`: the task is being transferred
* `TRANSFERRED`: the task has been transferred to downloads \
directory
* `SEARCHING RESOURCES`: the task is searching resources
* `FAILED`: the task is failed
* `DELETED`: the task is deleted
* `UNKNOWN STATUS`
:rtype: str |
def show_fields(self, block=None):
"""Retrieve and return the mapping for the given metadata block.
Arguments:
block (str): The top-level field to fetch the mapping for (for example, ``"mdf"``),
or the special values ``None`` for everything or ``"top"`` for just the
top-level fields.
**Default:** ``None``.
index (str): The Search index to map. **Default:** The current index.
Returns:
dict: ``field:datatype`` pairs.
"""
mapping = self._mapping()
if block is None:
return mapping
elif block == "top":
blocks = set()
for key in mapping.keys():
blocks.add(key.split(".")[0])
block_map = {}
for b in blocks:
block_map[b] = "object"
else:
block_map = {}
for key, value in mapping.items():
if key.startswith(block):
block_map[key] = value
return block_map | Retrieve and return the mapping for the given metadata block.
Arguments:
block (str): The top-level field to fetch the mapping for (for example, ``"mdf"``),
or the special values ``None`` for everything or ``"top"`` for just the
top-level fields.
**Default:** ``None``.
index (str): The Search index to map. **Default:** The current index.
Returns:
dict: ``field:datatype`` pairs. |
def dataoneTypes(request):
"""Return the PyXB binding to use when handling a request."""
if is_v1_api(request):
return d1_common.types.dataoneTypes_v1_1
elif is_v2_api(request) or is_diag_api(request):
return d1_common.types.dataoneTypes_v2_0
else:
raise d1_common.types.exceptions.ServiceFailure(
0, 'Unknown version designator in URL. url="{}"'.format(request.path)
) | Return the PyXB binding to use when handling a request. |
def tzname(self, dt):
"""
http://docs.python.org/library/datetime.html#datetime.tzinfo.tzname
"""
if self.__is_daylight_time(dt):
return time.tzname[1]
else:
return time.tzname[0] | http://docs.python.org/library/datetime.html#datetime.tzinfo.tzname |
def xlim(min, max):
"""
This function will set the x axis range for all time series plots
Parameters:
min : flt
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
max : flt
The time to end all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
Returns:
None
Examples:
>>> # Set the timespan to be 2017-07-17 00:00:00 plus 1 day
>>> import pytplot
>>> pytplot.xlim(1500249600, 1500249600 + 86400)
>>> # The same as above, but using different inputs
>>> pytplot.xlim("2017-07-17 00:00:00", "2017-07-18 00:00:00")
"""
if not isinstance(min, (int, float, complex)):
min = tplot_utilities.str_to_int(min)
if not isinstance(max, (int, float, complex)):
max = tplot_utilities.str_to_int(max)
if 'x_range' in tplot_opt_glob:
lim_info['xlast'] = tplot_opt_glob['x_range']
else:
lim_info['xfull'] = Range1d(min, max)
lim_info['xlast'] = Range1d(min, max)
tplot_opt_glob['x_range'] = [min, max]
return | This function will set the x axis range for all time series plots
Parameters:
min : flt
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
max : flt
The time to end all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
Returns:
None
Examples:
>>> # Set the timespan to be 2017-07-17 00:00:00 plus 1 day
>>> import pytplot
>>> pytplot.xlim(1500249600, 1500249600 + 86400)
>>> # The same as above, but using different inputs
>>> pytplot.xlim("2017-07-17 00:00:00", "2017-07-18 00:00:00") |
def memoize_single_arg(f):
"""
Decorator memoizing a single-argument function
"""
memo = {}
@wraps(f)
def memoized_f(arg):
try:
return memo[arg]
except KeyError:
result = memo[arg] = f(arg)
return result
return memoized_f | Decorator memoizing a single-argument function |
def route(self, uri, *args, **kwargs):
"""Create a plugin route from a decorated function.
:param uri: endpoint at which the route will be accessible.
:type uri: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The exception function to use as the decorator
:rtype: fn
"""
if len(args) == 0 and callable(uri):
raise RuntimeError("Cannot use the @route decorator without "
"arguments.")
kwargs.setdefault('methods', frozenset({'GET'}))
kwargs.setdefault('host', None)
kwargs.setdefault('strict_slashes', False)
kwargs.setdefault('stream', False)
kwargs.setdefault('name', None)
kwargs['with_context'] = True # This is the whole point of this plugin
def wrapper(handler_f):
nonlocal self, uri, args, kwargs
return super(Contextualize, self).route(
uri, *args, **kwargs)(handler_f)
return wrapper | Create a plugin route from a decorated function.
:param uri: endpoint at which the route will be accessible.
:type uri: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The exception function to use as the decorator
:rtype: fn |
def variantcall_sample(data, region=None, align_bams=None, out_file=None):
"""Parallel entry point for doing genotyping of a region of a sample.
"""
if out_file is None or not os.path.exists(out_file) or not os.path.lexists(out_file):
utils.safe_makedir(os.path.dirname(out_file))
ref_file = dd.get_ref_file(data)
config = data["config"]
caller_fns = get_variantcallers()
caller_fn = caller_fns[config["algorithm"].get("variantcaller")]
if len(align_bams) == 1:
items = [data]
else:
items = multi.get_orig_items(data)
assert len(items) == len(align_bams)
assoc_files = tz.get_in(("genome_resources", "variation"), data, {})
if not assoc_files: assoc_files = {}
for bam_file in align_bams:
bam.index(bam_file, data["config"], check_timestamp=False)
out_file = caller_fn(align_bams, items, ref_file, assoc_files, region, out_file)
if region:
data["region"] = region
data["vrn_file"] = out_file
return [data] | Parallel entry point for doing genotyping of a region of a sample. |
def expanduser(self, filepath, ssh=False):
"""Replaces the user root ~ with the full path on the file system.
Works for local disks and remote servers. For remote servers, set
ssh=True."""
if ssh:
self._check_ssh()
stdin, stdout, stderr = self.ssh.exec_command("cd; pwd")
stdin.close()
remotepath = filepath.replace("~", stdout.read().split()[0])
return self._get_tramp_path(remotepath)
else:
return os.path.expanduser(filepath) | Replaces the user root ~ with the full path on the file system.
Works for local disks and remote servers. For remote servers, set
ssh=True. |
def GetValues(self, table_names, column_names, condition):
"""Retrieves values from a table.
Args:
table_names (list[str]): table names.
column_names (list[str]): column names.
condition (str): query condition such as
"log_source == 'Application Error'".
Yields:
sqlite3.row: row.
Raises:
RuntimeError: if the database is not opened.
"""
if not self._connection:
raise RuntimeError('Cannot retrieve values database not opened.')
if condition:
condition = ' WHERE {0:s}'.format(condition)
sql_query = 'SELECT {1:s} FROM {0:s}{2:s}'.format(
', '.join(table_names), ', '.join(column_names), condition)
self._cursor.execute(sql_query)
# TODO: have a look at https://docs.python.org/2/library/
# sqlite3.html#sqlite3.Row.
for row in self._cursor:
yield {
column_name: row[column_index]
for column_index, column_name in enumerate(column_names)} | Retrieves values from a table.
Args:
table_names (list[str]): table names.
column_names (list[str]): column names.
condition (str): query condition such as
"log_source == 'Application Error'".
Yields:
sqlite3.row: row.
Raises:
RuntimeError: if the database is not opened. |
def top_level(self):
"""
Print just the top level of an object, being sure to show where
it goes deeper
"""
output = {}
if isinstance(self.obj, dict):
for name, item in self.obj.items():
if isinstance(item, dict):
if item:
output[name] = StrReprWrapper('{...}')
else:
output[name] = StrReprWrapper('{}')
elif isinstance(item, list):
if item:
output[name] = StrReprWrapper('[...]')
else:
output[name] = StrReprWrapper('[]')
else:
output[name] = item
return output
else:
return self.obj | Print just the top level of an object, being sure to show where
it goes deeper |
def _initialize_policy(self):
"""Initialize the policy.
Run the policy network on dummy data to initialize its parameters for later
reuse and to analyze the policy distribution. Initializes the attributes
`self._network` and `self._policy_type`.
Raises:
ValueError: Invalid policy distribution.
Returns:
Parameters of the policy distribution and policy state.
"""
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
network = functools.partial(
self._config.network, self._config, self._batch_env.action_space)
self._network = tf.make_template('network', network)
output = self._network(
tf.zeros_like(self._batch_env.observ)[:, None],
tf.ones(len(self._batch_env)))
if output.policy.event_shape != self._batch_env.action.shape[1:]:
message = 'Policy event shape {} does not match action shape {}.'
message = message.format(
output.policy.event_shape, self._batch_env.action.shape[1:])
raise ValueError(message)
self._policy_type = type(output.policy)
is_tensor = lambda x: isinstance(x, tf.Tensor)
policy_params = tools.nested.filter(is_tensor, output.policy.parameters)
set_batch_dim = lambda x: utility.set_dimension(x, 0, len(self._batch_env))
tools.nested.map(set_batch_dim, policy_params)
if output.state is not None:
tools.nested.map(set_batch_dim, output.state)
return policy_params, output.state | Initialize the policy.
Run the policy network on dummy data to initialize its parameters for later
reuse and to analyze the policy distribution. Initializes the attributes
`self._network` and `self._policy_type`.
Raises:
ValueError: Invalid policy distribution.
Returns:
Parameters of the policy distribution and policy state. |
def __get_blob_dimensions(self, chunk_dim):
""" Sets the blob dimmentions, trying to read around 1024 MiB at a time.
This is assuming a chunk is about 1 MiB.
"""
#Taking the size into consideration, but avoiding having multiple blobs within a single time bin.
if self.selection_shape[self.freq_axis] > chunk_dim[self.freq_axis]*MAX_BLOB_MB:
freq_axis_size = self.selection_shape[self.freq_axis]
# while freq_axis_size > chunk_dim[self.freq_axis]*MAX_BLOB_MB:
# freq_axis_size /= 2
time_axis_size = 1
else:
freq_axis_size = self.selection_shape[self.freq_axis]
time_axis_size = np.min([chunk_dim[self.time_axis] * MAX_BLOB_MB * chunk_dim[self.freq_axis] / freq_axis_size, self.selection_shape[self.time_axis]])
blob_dim = (int(time_axis_size), 1, freq_axis_size)
return blob_dim | Sets the blob dimmentions, trying to read around 1024 MiB at a time.
This is assuming a chunk is about 1 MiB. |
async def wait_until_serving(self) -> None:
"""
Await until the ``Endpoint`` is ready to receive events.
"""
await asyncio.gather(
self._receiving_loop_running.wait(),
self._internal_loop_running.wait(),
loop=self.event_loop
) | Await until the ``Endpoint`` is ready to receive events. |
def is_valid(self):
"""Validates the username and password in the form."""
form = super(AuthenticateForm, self).is_valid()
for f, error in self.errors.items():
if f != "__all__":
self.fields[f].widget.attrs.update({"class": "error", "placeholder": ", ".join(list(error))})
else:
errors = list(error)
if "This account is inactive." in errors:
message = "Intranet access restricted"
else:
message = "Invalid password"
self.fields["password"].widget.attrs.update({"class": "error", "placeholder": message})
return form | Validates the username and password in the form. |
def delete(method, hmc, uri, uri_parms, logon_required):
"""Operation: Delete <resource>."""
try:
resource = hmc.lookup_by_uri(uri)
except KeyError:
raise InvalidResourceError(method, uri)
resource.manager.remove(resource.oid) | Operation: Delete <resource>. |
def cmd_link_list(self):
'''list links'''
print("%u links" % len(self.mpstate.mav_master))
for i in range(len(self.mpstate.mav_master)):
conn = self.mpstate.mav_master[i]
print("%u: %s" % (i, conn.address)) | list links |
def plot(self, value=None, pixel=None):
"""
Plot the ROI
"""
# DEPRECATED
import ugali.utils.plotting
map_roi = np.array(hp.UNSEEN \
* np.ones(hp.nside2npix(self.config.params['coords']['nside_pixel'])))
if value is None:
#map_roi[self.pixels] = ugali.utils.projector.angsep(self.lon, self.lat, self.centers_lon, self.centers_lat)
map_roi[self.pixels] = 1
map_roi[self.pixels_annulus] = 0
map_roi[self.pixels_target] = 2
elif value is not None and pixel is None:
map_roi[self.pixels] = value
elif value is not None and pixel is not None:
map_roi[pixel] = value
else:
logger.error("Can't parse input")
ugali.utils.plotting.zoomedHealpixMap('Region of Interest',
map_roi,
self.lon, self.lat,
self.config.params['coords']['roi_radius']) | Plot the ROI |
def staff_member(view_func):
"""Performs user authentication check.
Similar to Django's `login_required` decorator, except that this throws
:exc:`~leonardo.exceptions.NotAuthenticated` exception if the user is not
signed-in.
"""
@functools.wraps(view_func, assigned=available_attrs(view_func))
def dec(request, *args, **kwargs):
if request.user.is_staff:
return view_func(request, *args, **kwargs)
raise PermissionDenied(_("You haven't permissions to do this action."))
return dec | Performs user authentication check.
Similar to Django's `login_required` decorator, except that this throws
:exc:`~leonardo.exceptions.NotAuthenticated` exception if the user is not
signed-in. |
def _get_available_choices(self, queryset, value):
"""
get possible choices for selection
"""
item = queryset.filter(pk=value).first()
if item:
try:
pk = getattr(item, self.chained_model_field + "_id")
filter = {self.chained_model_field: pk}
except AttributeError:
try: # maybe m2m?
pks = getattr(item, self.chained_model_field).all().values_list('pk', flat=True)
filter = {self.chained_model_field + "__in": pks}
except AttributeError:
try: # maybe a set?
pks = getattr(item, self.chained_model_field + "_set").all().values_list('pk', flat=True)
filter = {self.chained_model_field + "__in": pks}
except AttributeError: # give up
filter = {}
filtered = list(get_model(self.to_app_name, self.to_model_name).objects.filter(**filter).distinct())
if self.sort:
sort_results(filtered)
else:
# invalid value for queryset
filtered = []
return filtered | get possible choices for selection |
def update_abbreviations(apps, schema_editor):
"""
Migrate to new FR committee abbreviations
"""
# Get model managers
Group = apps.get_model("representatives", "Group")
# Abbreviation mapping
amap = {
u'SenComCult': u'Culture',
u'SenComEco': u'Économie',
u'SenComDef': u'Défense',
u'SenComEU': u'Europe',
u'SenComSoc': u'Social',
u'SenComFin': u'Finances',
u'SenComLois': u'Lois',
u'SenComDevD': u'',
u'SenComAppL': u'',
u'AnComCult': u'Culture',
u'AnComEco': u'Économie',
u'AnComEtrg': u'Étranger',
u'AnComDef': u'Défense',
u'AnComEU': u'Europe',
u'AnComSoc': u'Social',
u'AnComFin': u'Finances',
u'AnComLois': u'Lois',
u'AnComDevD': u'',
u'AnComImmu': u'',
}
for old, new in amap.iteritems():
for g in Group.objects.filter(abbreviation=old):
g.abbreviation = new
g.save() | Migrate to new FR committee abbreviations |
def to_end_tag(self, tag_func):
"""
Creates a tag that parses until it finds the corresponding end
tag, eg: for a tag named ``mytag`` it will parse until
``endmytag``. The decorated func's return value is used to
render the parsed content and takes three arguments - the
parsed content between the start and end tags, the template
context and the tag token.
"""
@wraps(tag_func)
def tag_wrapper(parser, token):
class ToEndTagNode(template.Node):
def __init__(self):
end_name = "end%s" % tag_func.__name__
self.nodelist = parser.parse((end_name,))
parser.delete_first_token()
def render(self, context):
args = (self.nodelist.render(context), context, token)
return tag_func(*args[:tag_func.__code__.co_argcount])
return ToEndTagNode()
return self.tag(tag_wrapper) | Creates a tag that parses until it finds the corresponding end
tag, eg: for a tag named ``mytag`` it will parse until
``endmytag``. The decorated func's return value is used to
render the parsed content and takes three arguments - the
parsed content between the start and end tags, the template
context and the tag token. |
def QueryHowDoI(Query, num_answers, full_text, window:sg.Window):
'''
Kicks off a subprocess to send the 'Query' to HowDoI
Prints the result, which in this program will route to a gooeyGUI window
:param Query: text english question to ask the HowDoI web engine
:return: nothing
'''
howdoi_command = HOW_DO_I_COMMAND
full_text_option = ' -a' if full_text else ''
t = subprocess.Popen(howdoi_command + ' \"'+ Query + '\" -n ' + str(num_answers)+full_text_option, stdout=subprocess.PIPE)
(output, err) = t.communicate()
window.Element('_OUTPUT_').Update('{:^88}'.format(Query.rstrip()), append=True)
window.Element('_OUTPUT_').Update('_'*60, append=True)
window.Element('_OUTPUT_').Update(output.decode("utf-8"), append=True)
exit_code = t.wait() | Kicks off a subprocess to send the 'Query' to HowDoI
Prints the result, which in this program will route to a gooeyGUI window
:param Query: text english question to ask the HowDoI web engine
:return: nothing |
def main():
"""
main
"""
arguments = IArguments(__doc__)
content = open(arguments.filepath).read()
open(arguments.filepath + ".bak", "w").write(content)
try:
newcontent = transliterate(content)
write_newcontent(arguments.filepath, newcontent)
except UnicodeEncodeError as ex:
console(str(ex), color="red")
newcontent = forceascii(content)
write_newcontent(arguments.filepath, newcontent) | main |
def _rename_full_name(self, full_name, other_trajectory, used_runs=None, new_run_idx=None):
"""Renames a full name based on the wildcards and a particular run"""
split_name = full_name.split('.')
for idx, name in enumerate(split_name):
if name in other_trajectory._reversed_wildcards:
run_indices, wildcards = other_trajectory._reversed_wildcards[name]
if new_run_idx is None:
# We can safely take the first index of the index list that matches
run_idx = None
for run_jdx in run_indices:
if run_jdx in used_runs:
run_idx = used_runs[run_jdx]
break
elif run_jdx == -1:
run_idx = -1
break
if run_idx is None:
raise RuntimeError('You shall not pass!')
else:
run_idx = new_run_idx
new_name = self.f_wildcard(wildcards[0], run_idx)
split_name[idx] = new_name
full_name = '.'.join(split_name)
return full_name | Renames a full name based on the wildcards and a particular run |
def get_dialog(self):
"""Return FormDialog instance"""
dialog = self.parent()
while not isinstance(dialog, QDialog):
dialog = dialog.parent()
return dialog | Return FormDialog instance |
def layout_circle(self):
'''Position vertices evenly around a circle.'''
n = self.num_vertices()
t = np.linspace(0, 2*np.pi, n+1)[:n]
return np.column_stack((np.cos(t), np.sin(t))) | Position vertices evenly around a circle. |
def validate(raw_schema, target=None, **kwargs):
"""
Given the python representation of a JSONschema as defined in the swagger
spec, validate that the schema complies to spec. If `target` is provided,
that target will be validated against the provided schema.
"""
schema = schema_validator(raw_schema, **kwargs)
if target is not None:
validate_object(target, schema=schema, **kwargs) | Given the python representation of a JSONschema as defined in the swagger
spec, validate that the schema complies to spec. If `target` is provided,
that target will be validated against the provided schema. |
async def sendPhoto(self, chat_id, photo,
caption=None,
parse_mode=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendphoto
:param photo:
- string: ``file_id`` for a photo existing on Telegram servers
- string: HTTP URL of a photo from the Internet
- file-like object: obtained by ``open(path, 'rb')``
- tuple: (filename, file-like object). If the filename contains
non-ASCII characters and you are using Python 2.7, make sure the
filename is a unicode string.
"""
p = _strip(locals(), more=['photo'])
return await self._api_request_with_file('sendPhoto', _rectify(p), 'photo', photo) | See: https://core.telegram.org/bots/api#sendphoto
:param photo:
- string: ``file_id`` for a photo existing on Telegram servers
- string: HTTP URL of a photo from the Internet
- file-like object: obtained by ``open(path, 'rb')``
- tuple: (filename, file-like object). If the filename contains
non-ASCII characters and you are using Python 2.7, make sure the
filename is a unicode string. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.