code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def dispatch(self, test=False): # pylint: disable=too-many-branches
"""
Send configuration to satellites
:return: None
"""
if not self.new_to_dispatch:
raise DispatcherError("Dispatcher cannot dispatch, "
"because no configuration is prepared!")
if self.first_dispatch_done:
raise DispatcherError("Dispatcher cannot dispatch, "
"because the configuration is still dispatched!")
if self.dispatch_ok:
logger.info("Dispatching is already done and ok...")
return
logger.info("Trying to send configuration to the satellites...")
self.dispatch_ok = True
# todo: the 3 loops hereunder may be factorized
for link in self.arbiters:
# If not me and a spare arbiter...
if link == self.arbiter_link:
# I exclude myself from the dispatching, I have my configuration ;)
continue
if not link.active:
# I exclude the daemons that are not active
continue
if not link.spare:
# Do not dispatch to a master arbiter!
continue
if link.configuration_sent:
logger.debug("Arbiter %s already sent!", link.name)
continue
if not link.reachable:
logger.debug("Arbiter %s is not reachable to receive its configuration",
link.name)
continue
logger.info("Sending configuration to the arbiter %s", link.name)
logger.debug("- %s", link.cfg)
link.put_conf(link.cfg, test=test)
link.configuration_sent = True
logger.info("- sent")
# Now that the spare arbiter has a configuration, tell him it must not run,
# because I'm not dead ;)
link.do_not_run()
for link in self.schedulers:
if link.configuration_sent:
logger.debug("Scheduler %s already sent!", link.name)
continue
if not link.active:
# I exclude the daemons that are not active
continue
if not link.reachable:
logger.debug("Scheduler %s is not reachable to receive its configuration",
link.name)
continue
logger.info("Sending configuration to the scheduler %s", link.name)
logger.debug("- %s", link.cfg)
link.put_conf(link.cfg, test=test)
link.configuration_sent = True
logger.info("- sent")
for link in self.satellites:
if link.configuration_sent:
logger.debug("%s %s already sent!", link.type, link.name)
continue
if not link.active:
# I exclude the daemons that are not active
continue
if not link.reachable:
logger.warning("%s %s is not reachable to receive its configuration",
link.type, link.name)
continue
logger.info("Sending configuration to the %s %s", link.type, link.name)
logger.debug("- %s", link.cfg)
link.put_conf(link.cfg, test=test)
link.configuration_sent = True
logger.info("- sent")
if self.dispatch_ok:
# Newly prepared configuration got dispatched correctly
self.new_to_dispatch = False
self.first_dispatch_done = True | Send configuration to satellites
:return: None |
def exhaust_stream(f):
"""Helper decorator for methods that exhausts the stream on return."""
def wrapper(self, stream, *args, **kwargs):
try:
return f(self, stream, *args, **kwargs)
finally:
exhaust = getattr(stream, "exhaust", None)
if exhaust is not None:
exhaust()
else:
while 1:
chunk = stream.read(1024 * 64)
if not chunk:
break
return update_wrapper(wrapper, f) | Helper decorator for methods that exhausts the stream on return. |
def execute_command_with_path_in_process(command, path, shell=False, cwd=None, logger=None):
"""Executes a specific command in a separate process with a path as argument.
:param command: the command to be executed
:param path: the path as first argument to the shell command
:param bool shell: Whether to use a shell
:param str cwd: The working directory of the command
:param logger: optional logger instance which can be handed from other module
:return: None
"""
if logger is None:
logger = _logger
logger.debug("Opening path with command: {0} {1}".format(command, path))
# This splits the command in a matter so that the command gets called in a separate shell and thus
# does not lock the window.
args = shlex.split('{0} "{1}"'.format(command, path))
try:
subprocess.Popen(args, shell=shell, cwd=cwd)
return True
except OSError as e:
logger.error('The operating system raised an error: {}'.format(e))
return False | Executes a specific command in a separate process with a path as argument.
:param command: the command to be executed
:param path: the path as first argument to the shell command
:param bool shell: Whether to use a shell
:param str cwd: The working directory of the command
:param logger: optional logger instance which can be handed from other module
:return: None |
def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
""" Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
"""
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str) | Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter. |
def send_facebook(self, token):
"""
Tells the server which Facebook account this client uses.
After sending, the server takes some time to
get the data from Facebook.
Seems to be broken in recent versions of the game.
"""
self.send_struct('<B%iB' % len(token), 81, *map(ord, token))
self.facebook_token = token | Tells the server which Facebook account this client uses.
After sending, the server takes some time to
get the data from Facebook.
Seems to be broken in recent versions of the game. |
def cmd(send, _, args):
"""Returns a list of admins.
V = Verified (authed to NickServ), U = Unverified.
Syntax: {command}
"""
adminlist = []
for admin in args['db'].query(Permissions).order_by(Permissions.nick).all():
if admin.registered:
adminlist.append("%s (V)" % admin.nick)
else:
adminlist.append("%s (U)" % admin.nick)
send(", ".join(adminlist), target=args['nick']) | Returns a list of admins.
V = Verified (authed to NickServ), U = Unverified.
Syntax: {command} |
def encode(char_data, encoding='utf-8'):
"""
Encode the parameter as a byte string.
:param char_data:
:rtype: bytes
"""
if type(char_data) is unicode:
return char_data.encode(encoding, 'replace')
else:
return char_data | Encode the parameter as a byte string.
:param char_data:
:rtype: bytes |
def normalizeGlyphRightMargin(value):
"""
Normalizes glyph right margin.
* **value** must be a :ref:`type-int-float` or `None`.
* Returned value is the same type as the input value.
"""
if not isinstance(value, (int, float)) and value is not None:
raise TypeError("Glyph right margin must be an :ref:`type-int-float`, "
"not %s." % type(value).__name__)
return value | Normalizes glyph right margin.
* **value** must be a :ref:`type-int-float` or `None`.
* Returned value is the same type as the input value. |
def _approximate_eigenvalues(A, tol, maxiter, symmetric=None,
initial_guess=None):
"""Apprixmate eigenvalues.
Used by approximate_spectral_radius and condest.
Returns [W, E, H, V, breakdown_flag], where W and E are the eigenvectors
and eigenvalues of the Hessenberg matrix H, respectively, and V is the
Krylov space. breakdown_flag denotes whether Lanczos/Arnoldi suffered
breakdown. E is therefore the approximate eigenvalues of A.
To obtain approximate eigenvectors of A, compute V*W.
"""
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A) # A could be dense or sparse, or something weird
# Choose tolerance for deciding if break-down has occurred
t = A.dtype.char
eps = np.finfo(np.float).eps
feps = np.finfo(np.single).eps
geps = np.finfo(np.longfloat).eps
_array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
breakdown = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]]
breakdown_flag = False
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
maxiter = min(A.shape[0], maxiter)
if initial_guess is None:
v0 = sp.rand(A.shape[1], 1)
if A.dtype == complex:
v0 = v0 + 1.0j * sp.rand(A.shape[1], 1)
else:
v0 = initial_guess
v0 /= norm(v0)
# Important to type H based on v0, so that a real nonsymmetric matrix, can
# have an imaginary initial guess for its Arnoldi Krylov space
H = np.zeros((maxiter+1, maxiter),
dtype=np.find_common_type([v0.dtype, A.dtype], []))
V = [v0]
beta = 0.0
for j in range(maxiter):
w = A * V[-1]
if symmetric:
if j >= 1:
H[j-1, j] = beta
w -= beta * V[-2]
alpha = np.dot(np.conjugate(w.ravel()), V[-1].ravel())
H[j, j] = alpha
w -= alpha * V[-1] # axpy(V[-1],w,-alpha)
beta = norm(w)
H[j+1, j] = beta
if (H[j+1, j] < breakdown):
breakdown_flag = True
break
w /= beta
V.append(w)
V = V[-2:] # retain only last two vectors
else:
# orthogonalize against Vs
for i, v in enumerate(V):
H[i, j] = np.dot(np.conjugate(v.ravel()), w.ravel())
w = w - H[i, j]*v
H[j+1, j] = norm(w)
if (H[j+1, j] < breakdown):
breakdown_flag = True
if H[j+1, j] != 0.0:
w = w/H[j+1, j]
V.append(w)
break
w = w/H[j+1, j]
V.append(w)
# if upper 2x2 block of Hessenberg matrix H is almost symmetric,
# and the user has not explicitly specified symmetric=False,
# then switch to symmetric Lanczos algorithm
# if symmetric is not False and j == 1:
# if abs(H[1,0] - H[0,1]) < 1e-12:
# #print "using symmetric mode"
# symmetric = True
# V = V[1:]
# H[1,0] = H[0,1]
# beta = H[2,1]
# print "Approximated spectral radius in %d iterations" % (j + 1)
from scipy.linalg import eig
Eigs, Vects = eig(H[:j+1, :j+1], left=False, right=True)
return (Vects, Eigs, H, V, breakdown_flag) | Apprixmate eigenvalues.
Used by approximate_spectral_radius and condest.
Returns [W, E, H, V, breakdown_flag], where W and E are the eigenvectors
and eigenvalues of the Hessenberg matrix H, respectively, and V is the
Krylov space. breakdown_flag denotes whether Lanczos/Arnoldi suffered
breakdown. E is therefore the approximate eigenvalues of A.
To obtain approximate eigenvectors of A, compute V*W. |
def count_matrix(self):
# TODO: does this belong here or to the BHMM sampler, or in a subclass containing HMM with data?
"""Compute the transition count matrix from hidden state trajectory.
Returns
-------
C : numpy.array with shape (nstates,nstates)
C[i,j] is the number of transitions observed from state i to state j
Raises
------
RuntimeError
A RuntimeError is raised if the HMM model does not yet have a hidden state trajectory associated with it.
Examples
--------
"""
if self.hidden_state_trajectories is None:
raise RuntimeError('HMM model does not have a hidden state trajectory.')
C = msmest.count_matrix(self.hidden_state_trajectories, 1, nstates=self._nstates)
return C.toarray() | Compute the transition count matrix from hidden state trajectory.
Returns
-------
C : numpy.array with shape (nstates,nstates)
C[i,j] is the number of transitions observed from state i to state j
Raises
------
RuntimeError
A RuntimeError is raised if the HMM model does not yet have a hidden state trajectory associated with it.
Examples
-------- |
def main_base_ramp(self) -> "Ramp":
""" Returns the Ramp instance of the closest main-ramp to start location. Look in game_info.py for more information """
if hasattr(self, "cached_main_base_ramp"):
return self.cached_main_base_ramp
self.cached_main_base_ramp = min(
{ramp for ramp in self.game_info.map_ramps if len(ramp.upper2_for_ramp_wall) == 2},
key=(lambda r: self.start_location.distance_to(r.top_center)),
)
return self.cached_main_base_ramp | Returns the Ramp instance of the closest main-ramp to start location. Look in game_info.py for more information |
def _validate_logical(self, rule, field, value):
""" {'allowed': ('allof', 'anyof', 'noneof', 'oneof')} """
if not isinstance(value, Sequence):
self._error(field, errors.BAD_TYPE)
return
validator = self._get_child_validator(
document_crumb=rule, allow_unknown=False,
schema=self.target_validator.validation_rules)
for constraints in value:
_hash = (mapping_hash({'turing': constraints}),
mapping_hash(self.target_validator.types_mapping))
if _hash in self.target_validator._valid_schemas:
continue
validator(constraints, normalize=False)
if validator._errors:
self._error(validator._errors)
else:
self.target_validator._valid_schemas.add(_hash) | {'allowed': ('allof', 'anyof', 'noneof', 'oneof')} |
def set(self, key, val):
"""
Return a new PMap with key and val inserted.
>>> m1 = m(a=1, b=2)
>>> m2 = m1.set('a', 3)
>>> m3 = m1.set('c' ,4)
>>> m1
pmap({'a': 1, 'b': 2})
>>> m2
pmap({'a': 3, 'b': 2})
>>> m3
pmap({'a': 1, 'c': 4, 'b': 2})
"""
return self.evolver().set(key, val).persistent() | Return a new PMap with key and val inserted.
>>> m1 = m(a=1, b=2)
>>> m2 = m1.set('a', 3)
>>> m3 = m1.set('c' ,4)
>>> m1
pmap({'a': 1, 'b': 2})
>>> m2
pmap({'a': 3, 'b': 2})
>>> m3
pmap({'a': 1, 'c': 4, 'b': 2}) |
def _build_flavors(p, flist, qualdecl=None):
"""
Build and return a dictionary defining the flavors from the
flist argument.
This function maps from the input keyword definitions for the flavors
(ex. EnableOverride) to the PyWBEM internal definitions
(ex. overridable)
Uses the qualdecl argument as a basis if it exists. This is to define
qualifier flavors if qualfier declaractions exist.
This applies the values from the qualifierDecl to the the qualifier
flavor list.
This function and the defaultflavor function insure that all
flavors are defined in the created dictionary that is returned. This
is important because the PyWBEM classes allow `None` as a flavor
definition.
"""
flavors = {}
if ('disableoverride' in flist and 'enableoverride' in flist) \
or \
('restricted' in flist and 'tosubclass' in flist): # noqa: E125
raise MOFParseError(parser_token=p, msg="Conflicting flavors are"
"invalid")
if qualdecl is not None:
flavors = {'overridable': qualdecl.overridable,
'translatable': qualdecl.translatable,
'tosubclass': qualdecl.tosubclass,
'toinstance': qualdecl.toinstance}
if 'disableoverride' in flist:
flavors['overridable'] = False
if 'enableoverride' in flist:
flavors['overridable'] = True
if 'translatable' in flist:
flavors['translatable'] = True
if 'restricted' in flist:
flavors['tosubclass'] = False
if 'tosubclass' in flist:
flavors['tosubclass'] = True
if 'toinstance' in flist:
flavors['toinstance'] = True
# issue #193 ks 5/16 removed tosubclass & set toinstance.
return flavors | Build and return a dictionary defining the flavors from the
flist argument.
This function maps from the input keyword definitions for the flavors
(ex. EnableOverride) to the PyWBEM internal definitions
(ex. overridable)
Uses the qualdecl argument as a basis if it exists. This is to define
qualifier flavors if qualfier declaractions exist.
This applies the values from the qualifierDecl to the the qualifier
flavor list.
This function and the defaultflavor function insure that all
flavors are defined in the created dictionary that is returned. This
is important because the PyWBEM classes allow `None` as a flavor
definition. |
def validate(self):
"""
Perform validation check on properties.
"""
if not self.api_token or not self.api_token_secret:
raise ImproperlyConfigured("'api_token' and 'api_token_secret' are required for authentication.")
if self.response_type not in ["json", "pson", "xml", "debug", None]:
raise ImproperlyConfigured("'%s' is an invalid response_type" % self.response_type) | Perform validation check on properties. |
def describe_unsupported(series, **kwargs):
"""Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
results_data = {'count': count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'type': base.S_TYPE_UNSUPPORTED}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name) | Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys. |
def publish(self, message, tag=b''):
"""
Publish `message` with specified `tag`.
:param message: message data
:type message: str
:param tag: message tag
:type tag: str
"""
self.send(tag + b'\0' + message) | Publish `message` with specified `tag`.
:param message: message data
:type message: str
:param tag: message tag
:type tag: str |
def update(self):
"""Update sensors stats using the input method."""
# Init new stats
stats = self.get_init_value()
if self.input_method == 'local':
# Update stats using the dedicated lib
stats = []
# Get the temperature
try:
temperature = self.__set_type(self.glancesgrabsensors.get('temperature_core'),
'temperature_core')
except Exception as e:
logger.error("Cannot grab sensors temperatures (%s)" % e)
else:
# Append temperature
stats.extend(temperature)
# Get the FAN speed
try:
fan_speed = self.__set_type(self.glancesgrabsensors.get('fan_speed'),
'fan_speed')
except Exception as e:
logger.error("Cannot grab FAN speed (%s)" % e)
else:
# Append FAN speed
stats.extend(fan_speed)
# Update HDDtemp stats
try:
hddtemp = self.__set_type(self.hddtemp_plugin.update(),
'temperature_hdd')
except Exception as e:
logger.error("Cannot grab HDD temperature (%s)" % e)
else:
# Append HDD temperature
stats.extend(hddtemp)
# Update batteries stats
try:
batpercent = self.__set_type(self.batpercent_plugin.update(),
'battery')
except Exception as e:
logger.error("Cannot grab battery percent (%s)" % e)
else:
# Append Batteries %
stats.extend(batpercent)
elif self.input_method == 'snmp':
# Update stats using SNMP
# No standard:
# http://www.net-snmp.org/wiki/index.php/Net-SNMP_and_lm-sensors_on_Ubuntu_10.04
pass
# Set the alias for each stat
for stat in stats:
alias = self.has_alias(stat["label"].lower())
if alias:
stat["label"] = alias
# Update the stats
self.stats = stats
return self.stats | Update sensors stats using the input method. |
def transformer_en_de_512(dataset_name=None, src_vocab=None, tgt_vocab=None, pretrained=False,
ctx=cpu(), root=os.path.join(get_home_dir(), 'models'), **kwargs):
r"""Transformer pretrained model.
Embedding size is 400, and hidden layer size is 1150.
Parameters
----------
dataset_name : str or None, default None
src_vocab : gluonnlp.Vocab or None, default None
tgt_vocab : gluonnlp.Vocab or None, default None
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
MXNET_HOME defaults to '~/.mxnet'.
Returns
-------
gluon.Block, gluonnlp.Vocab, gluonnlp.Vocab
"""
predefined_args = {'num_units': 512,
'hidden_size': 2048,
'dropout': 0.1,
'epsilon': 0.1,
'num_layers': 6,
'num_heads': 8,
'scaled': True,
'share_embed': True,
'embed_size': 512,
'tie_weights': True,
'embed_initializer': None}
mutable_args = frozenset(['num_units', 'hidden_size', 'dropout', 'epsilon', 'num_layers',
'num_heads', 'scaled'])
assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \
'Cannot override predefined model settings.'
predefined_args.update(kwargs)
encoder, decoder = get_transformer_encoder_decoder(units=predefined_args['num_units'],
hidden_size=predefined_args['hidden_size'],
dropout=predefined_args['dropout'],
num_layers=predefined_args['num_layers'],
num_heads=predefined_args['num_heads'],
max_src_length=530,
max_tgt_length=549,
scaled=predefined_args['scaled'])
return _get_transformer_model(NMTModel, 'transformer_en_de_512', dataset_name,
src_vocab, tgt_vocab, encoder, decoder,
predefined_args['share_embed'], predefined_args['embed_size'],
predefined_args['tie_weights'],
predefined_args['embed_initializer'], pretrained, ctx, root) | r"""Transformer pretrained model.
Embedding size is 400, and hidden layer size is 1150.
Parameters
----------
dataset_name : str or None, default None
src_vocab : gluonnlp.Vocab or None, default None
tgt_vocab : gluonnlp.Vocab or None, default None
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
MXNET_HOME defaults to '~/.mxnet'.
Returns
-------
gluon.Block, gluonnlp.Vocab, gluonnlp.Vocab |
def format(logger,
show_successful=True,
show_errors=True,
show_traceback=True):
"""
Prints a report of the actions that were logged by the given Logger.
The report contains a list of successful actions, as well as the full
error message on failed actions.
:type logger: Logger
:param logger: The logger that recorded what happened in the queue.
:rtype: string
:return: A string summarizing the status of every performed task.
"""
output = []
# Print failed actions.
errors = logger.get_aborted_actions()
if show_errors and errors:
output += _underline('Failed actions:')
for log in logger.get_aborted_logs():
if show_traceback:
output.append(log.get_name() + ':')
output.append(log.get_error())
else:
output.append(log.get_name() + ': ' + log.get_error(False))
output.append('')
# Print successful actions.
if show_successful:
output += _underline('Successful actions:')
for log in logger.get_succeeded_logs():
output.append(log.get_name())
output.append('')
return '\n'.join(output).strip() | Prints a report of the actions that were logged by the given Logger.
The report contains a list of successful actions, as well as the full
error message on failed actions.
:type logger: Logger
:param logger: The logger that recorded what happened in the queue.
:rtype: string
:return: A string summarizing the status of every performed task. |
def get_root_path(self, language):
"""
Get root path to pass to the LSP servers.
This can be the current project path or the output of
getcwd_or_home (except for Python, see below).
"""
path = None
# Get path of the current project
if self.main and self.main.projects:
path = self.main.projects.get_active_project_path()
# If there's no project, use the output of getcwd_or_home.
if not path:
# We can't use getcwd_or_home for Python because if it
# returns home and you have a lot of Python files on it
# then computing Rope completions takes a long time
# and blocks the PyLS server.
# Instead we use an empty directory inside our config one,
# just like we did for Rope in Spyder 3.
if language == 'python':
path = get_conf_path('lsp_root_path')
if not osp.exists(path):
os.mkdir(path)
else:
path = getcwd_or_home()
return path | Get root path to pass to the LSP servers.
This can be the current project path or the output of
getcwd_or_home (except for Python, see below). |
def _call_process(self, method, *args, **kwargs):
"""Run the given git command with the specified arguments and return
the result as a String
:param method:
is the command. Contained "_" characters will be converted to dashes,
such as in 'ls_files' to call 'ls-files'.
:param args:
is the list of arguments. If None is included, it will be pruned.
This allows your commands to call git more conveniently as None
is realized as non-existent
:param kwargs:
is a dict of keyword arguments.
This function accepts the same optional keyword arguments
as execute().
``Examples``::
git.rev_list('master', max_count=10, header=True)
:return: Same as ``execute``"""
# Handle optional arguments prior to calling transform_kwargs
# otherwise these'll end up in args, which is bad.
_kwargs = dict()
for kwarg in execute_kwargs:
try:
_kwargs[kwarg] = kwargs.pop(kwarg)
except KeyError:
pass
# Prepare the argument list
opt_args = self.transform_kwargs(**kwargs)
ext_args = self.__unpack_args([a for a in args if a is not None])
args = opt_args + ext_args
call = ["git", dashify(method)]
call.extend(args)
return self.execute(call, **_kwargs) | Run the given git command with the specified arguments and return
the result as a String
:param method:
is the command. Contained "_" characters will be converted to dashes,
such as in 'ls_files' to call 'ls-files'.
:param args:
is the list of arguments. If None is included, it will be pruned.
This allows your commands to call git more conveniently as None
is realized as non-existent
:param kwargs:
is a dict of keyword arguments.
This function accepts the same optional keyword arguments
as execute().
``Examples``::
git.rev_list('master', max_count=10, header=True)
:return: Same as ``execute`` |
def get_cpu_info(self) -> str:
'''Show device CPU information.'''
output, _ = self._execute(
'-s', self.device_sn, 'shell', 'cat', '/proc/cpuinfo')
return output | Show device CPU information. |
def get_coeffs(expr, expand=False, epsilon=0.):
"""Create a dictionary with all Operator terms of the expression
(understood as a sum) as keys and their coefficients as values.
The returned object is a defaultdict that return 0. if a term/key
doesn't exist.
Args:
expr: The operator expression to get all coefficients from.
expand: Whether to expand the expression distributively.
epsilon: If non-zero, drop all Operators with coefficients that have
absolute value less than epsilon.
Returns:
dict: A dictionary ``{op1: coeff1, op2: coeff2, ...}``
"""
if expand:
expr = expr.expand()
ret = defaultdict(int)
operands = expr.operands if isinstance(expr, OperatorPlus) else [expr]
for e in operands:
c, t = _coeff_term(e)
try:
if abs(complex(c)) < epsilon:
continue
except TypeError:
pass
ret[t] += c
return ret | Create a dictionary with all Operator terms of the expression
(understood as a sum) as keys and their coefficients as values.
The returned object is a defaultdict that return 0. if a term/key
doesn't exist.
Args:
expr: The operator expression to get all coefficients from.
expand: Whether to expand the expression distributively.
epsilon: If non-zero, drop all Operators with coefficients that have
absolute value less than epsilon.
Returns:
dict: A dictionary ``{op1: coeff1, op2: coeff2, ...}`` |
def _send_packet(
self, ip, port, packet,
update_timestamp=True, acknowledge_packet=True
):
"""
Send a packet
:param ip: Ip to send to
:type ip: str
:param port: Port to send to
:type port: int
:param packet: Packet to be transmitted
:type packet: APPMessage
:param update_timestamp: Should update timestamp to current
:type update_timestamp: bool
:param acknowledge_packet: Should packet get acknowledged
:type acknowledge_packet: bool
:rtype: None
"""
if acknowledge_packet:
packet.header.sequence_number = self._send_seq_num
self._send_seq_num += 1
packet.header.device_id = self._device_id
try:
packed = packet.pack(update_timestamp=update_timestamp)
except ValueError:
self.exception("Failed to pack packet")
return
self._send(ip, port, packed)
# TODO: add to wait for ack list
if acknowledge_packet:
with self._seq_ack_lock:
self._seq_ack.add(packet.header.sequence_number)
self._to_ack.put(
(time.time() + self._retransmit_timeout, 1, (ip, port), packet)
)
self.debug(u"Send: {}".format(packet)) | Send a packet
:param ip: Ip to send to
:type ip: str
:param port: Port to send to
:type port: int
:param packet: Packet to be transmitted
:type packet: APPMessage
:param update_timestamp: Should update timestamp to current
:type update_timestamp: bool
:param acknowledge_packet: Should packet get acknowledged
:type acknowledge_packet: bool
:rtype: None |
def get_model(cls, name=None, status=ENABLED):
"""
Returns model instance of plugin point or plugin, depending from which
class this methos is called.
Example::
plugin_model_instance = MyPlugin.get_model()
plugin_model_instance = MyPluginPoint.get_model('plugin-name')
plugin_point_model_instance = MyPluginPoint.get_model()
"""
ppath = cls.get_pythonpath()
if is_plugin_point(cls):
if name is not None:
kwargs = {}
if status is not None:
kwargs['status'] = status
return Plugin.objects.get(point__pythonpath=ppath,
name=name, **kwargs)
else:
return PluginPointModel.objects.get(pythonpath=ppath)
else:
return Plugin.objects.get(pythonpath=ppath) | Returns model instance of plugin point or plugin, depending from which
class this methos is called.
Example::
plugin_model_instance = MyPlugin.get_model()
plugin_model_instance = MyPluginPoint.get_model('plugin-name')
plugin_point_model_instance = MyPluginPoint.get_model() |
def coerce(cls, arg):
"""Given an arg, return the appropriate value given the class."""
try:
return cls(arg).value
except (ValueError, TypeError):
raise InvalidParameterDatatype("%s coerce error" % (cls.__name__,)) | Given an arg, return the appropriate value given the class. |
def _get_simple_dtype_and_shape(self, colnum, rows=None):
"""
When reading a single column, we want the basic data
type and the shape of the array.
for scalar columns, shape is just nrows, otherwise
it is (nrows, dim1, dim2)
Note if rows= is sent and only a single row is requested,
the shape will be (dim2,dim2)
"""
# basic datatype
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
info = self._info['colinfo'][colnum]
name = info['name']
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
shape = None
tdim = info['tdim']
shape = _tdim2shape(tdim, name, is_string=(npy_type[0] == 'S'))
if shape is not None:
if nrows > 1:
if not isinstance(shape, tuple):
# vector
shape = (nrows, shape)
else:
# multi-dimensional
shape = tuple([nrows] + list(shape))
else:
# scalar
shape = nrows
return npy_type, shape | When reading a single column, we want the basic data
type and the shape of the array.
for scalar columns, shape is just nrows, otherwise
it is (nrows, dim1, dim2)
Note if rows= is sent and only a single row is requested,
the shape will be (dim2,dim2) |
def predict_is(self, h=5, fit_once=True, fit_method='MLE', intervals=False):
""" Makes dynamic in-sample predictions with the estimated model
Parameters
----------
h : int (default : 5)
How many steps would you like to forecast?
fit_once : boolean
(default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint
fit_method : string
Which method to fit the model with
intervals : boolean
Whether to output prediction intervals or not
Returns
----------
- pd.DataFrame with predicted values
"""
predictions = []
for t in range(0,h):
data1 = self.data_original.iloc[:-h+t,:]
data2 = self.data_original.iloc[-h+t:,:]
x = DynReg(formula=self.formula, data=data1)
if fit_once is False:
x.fit(printer=False, fit_method=fit_method)
if t == 0:
if fit_once is True:
x.fit(printer=False, fit_method=fit_method)
saved_lvs = x.latent_variables
predictions = x.predict(1, oos_data=data2, intervals=intervals)
else:
if fit_once is True:
x.latent_variables = saved_lvs
predictions = pd.concat([predictions,x.predict(h=1, oos_data=data2, intervals=intervals)])
predictions.rename(columns={0:self.y_name}, inplace=True)
predictions.index = self.index[-h:]
return predictions | Makes dynamic in-sample predictions with the estimated model
Parameters
----------
h : int (default : 5)
How many steps would you like to forecast?
fit_once : boolean
(default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint
fit_method : string
Which method to fit the model with
intervals : boolean
Whether to output prediction intervals or not
Returns
----------
- pd.DataFrame with predicted values |
def extend(self, *args):
"""
Extend a given object with all the properties in
passed-in object(s).
"""
args = list(args)
for i in args:
self.obj.update(i)
return self._wrap(self.obj) | Extend a given object with all the properties in
passed-in object(s). |
def add(self, fact):
"""Create a VALID token and send it to all children."""
token = Token.valid(fact)
MATCHER.debug("<BusNode> added %r", token)
for child in self.children:
child.callback(token) | Create a VALID token and send it to all children. |
def widgetEdited(self, event=None, val=None, action='entry', skipDups=True):
""" A general method for firing any applicable triggers when
a value has been set. This is meant to be easily callable from any
part of this class (or its subclasses), so that it can be called
as soon as need be (immed. on click?). This is smart enough to
be called multiple times, itself handling the removal of any/all
duplicate successive calls (unless skipDups is False). If val is
None, it will use the GUI entry's current value via choice.get().
See teal.py for a description of action.
"""
# be as lightweight as possible if obj doesn't care about this stuff
if not self._editedCallbackObj and not self._flagNonDefaultVals:
return
# get the current value
curVal = val # take this first, if it is given
if curVal is None:
curVal = self.choice.get()
# do any flagging
self.flagThisPar(curVal, False)
# see if this is a duplicate successive call for the same value
if skipDups and curVal==self._lastWidgetEditedVal: return
# pull trigger
if not self._editedCallbackObj: return
self._editedCallbackObj.edited(self.paramInfo.scope,
self.paramInfo.name,
self.previousValue, curVal,
action)
# for our duplicate checker
self._lastWidgetEditedVal = curVal | A general method for firing any applicable triggers when
a value has been set. This is meant to be easily callable from any
part of this class (or its subclasses), so that it can be called
as soon as need be (immed. on click?). This is smart enough to
be called multiple times, itself handling the removal of any/all
duplicate successive calls (unless skipDups is False). If val is
None, it will use the GUI entry's current value via choice.get().
See teal.py for a description of action. |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(IPMISensorCollector, self).get_default_config()
config.update({
'bin': '/usr/bin/ipmitool',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
'path': 'ipmi.sensors',
'thresholds': False,
'delimiter': '.'
})
return config | Returns the default collector settings |
def curated(name):
"""Download and return a path to a sample that is curated by the PyAV developers.
Data is handled by :func:`cached_download`.
"""
return cached_download('https://docs.mikeboers.com/pyav/samples/' + name,
os.path.join('pyav-curated', name.replace('/', os.path.sep))) | Download and return a path to a sample that is curated by the PyAV developers.
Data is handled by :func:`cached_download`. |
def power(self, n):
"""The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Kraus: the matrix power of the SuperOp converted to a Kraus channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer.
"""
if n > 0:
return super().power(n)
return Kraus(SuperOp(self).power(n)) | The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Kraus: the matrix power of the SuperOp converted to a Kraus channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer. |
def tar(filename, dirs=[], gzip=False):
""" Create a tar-file or a tar.gz at location: filename.
params:
gzip: if True - gzip the file, default = False
dirs: dirs to be tared
returns a 3-tuple with returncode (integer), terminal output (string)
and the new filename.
"""
if gzip:
cmd = 'tar czvf %s ' % filename
else:
cmd = 'tar cvf %s ' % filename
if type(dirs) != 'list':
dirs = [dirs]
cmd += ' '.join(str(x) for x in dirs)
retcode, output = sh(cmd)
return (retcode, output, filename) | Create a tar-file or a tar.gz at location: filename.
params:
gzip: if True - gzip the file, default = False
dirs: dirs to be tared
returns a 3-tuple with returncode (integer), terminal output (string)
and the new filename. |
def filter_by_cols(self, cols, ID=None):
"""
Keep only Measurements in corresponding columns.
"""
rows = to_list(cols)
fil = lambda x: x in rows
applyto = {k: self._positions[k][1] for k in self.keys()}
if ID is None:
ID = self.ID + '.filtered_by_cols'
return self.filter(fil, applyto=applyto, ID=ID) | Keep only Measurements in corresponding columns. |
def strip_empty_lines_forward(self, content, i):
"""
Skip over empty lines
:param content: parsed text
:param i: current parsed line
:return: number of skipped lined
"""
while i < len(content):
line = content[i].strip(' \r\n\t\f')
if line != '':
break
self.debug_print_strip_msg(i, content[i])
i += 1 # Strip an empty line
return i | Skip over empty lines
:param content: parsed text
:param i: current parsed line
:return: number of skipped lined |
def from_pdf(
cls, pdf, filename, width=288, height=432, dpi=203, font_path=None,
center_of_pixel=False, use_bindings=False
):
"""
Filename is 1-8 alphanumeric characters to identify the GRF in ZPL.
Dimensions and DPI are for a typical 4"x6" shipping label.
E.g. 432 points / 72 points in an inch / 203 dpi = 6 inches
Using center of pixel will improve barcode quality but may decrease
the quality of some text.
use_bindings=False:
- Uses subprocess.Popen
- Forks so there is a memory spike
- Easier to setup - only needs the gs binary
use_bindings=True:
- Uses python-ghostscript
- Doesn't fork so should use less memory
- python-ghostscript is a bit buggy
- May be harder to setup - even if you have updated the gs binary
there may stil be old libgs* files on your system
"""
# Most arguments below are based on what CUPS uses
setpagedevice = [
'/.HWMargins[0.000000 0.000000 0.000000 0.000000]',
'/Margins[0 0]'
]
cmd = [
'gs',
'-dQUIET',
'-dPARANOIDSAFER',
'-dNOPAUSE',
'-dBATCH',
'-dNOINTERPOLATE',
'-sDEVICE=pngmono',
'-dAdvanceDistance=1000',
'-r%s' % int(dpi),
'-dDEVICEWIDTHPOINTS=%s' % int(width),
'-dDEVICEHEIGHTPOINTS=%s' % int(height),
'-dFIXEDMEDIA',
'-dPDFFitPage',
'-c',
'<<%s>>setpagedevice' % ' '.join(setpagedevice)
]
if center_of_pixel:
cmd += ['0 .setfilladjust']
if font_path and os.path.exists(font_path):
cmd += ['-I' + font_path]
if use_bindings:
import ghostscript
# python-ghostscript doesn't like reading/writing from
# stdin/stdout so we need to use temp files
with tempfile.NamedTemporaryFile() as in_file, \
tempfile.NamedTemporaryFile() as out_file:
in_file.write(pdf)
in_file.flush()
# Ghostscript seems to be sensitive to argument order
cmd[13:13] += [
'-sOutputFile=%s' % out_file.name
]
cmd += [
'-f', in_file.name
]
try:
ghostscript.Ghostscript(*[c.encode('ascii') for c in cmd])
except Exception as e:
raise GRFException(e)
pngs = out_file.read()
else:
from subprocess import PIPE, Popen
# Ghostscript seems to be sensitive to argument order
cmd[13:13] += [
'-sstdout=%stderr',
'-sOutputFile=%stdout',
]
cmd += [
'-f', '-'
]
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
pngs, stderr = p.communicate(pdf)
if stderr:
raise GRFException(stderr)
# This is what PIL uses to identify PNGs
png_start = b'\211PNG\r\n\032\n'
grfs = []
for png in pngs.split(png_start)[1:]:
grfs.append(cls.from_image(png_start + png, filename))
return grfs | Filename is 1-8 alphanumeric characters to identify the GRF in ZPL.
Dimensions and DPI are for a typical 4"x6" shipping label.
E.g. 432 points / 72 points in an inch / 203 dpi = 6 inches
Using center of pixel will improve barcode quality but may decrease
the quality of some text.
use_bindings=False:
- Uses subprocess.Popen
- Forks so there is a memory spike
- Easier to setup - only needs the gs binary
use_bindings=True:
- Uses python-ghostscript
- Doesn't fork so should use less memory
- python-ghostscript is a bit buggy
- May be harder to setup - even if you have updated the gs binary
there may stil be old libgs* files on your system |
def get_people(self, user_alias=None):
"""
获取用户信息
:param user_alias: 用户ID
:return:
"""
user_alias = user_alias or self.api.user_alias
content = self.api.req(API_PEOPLE_HOME % user_alias).content
xml = self.api.to_xml(re.sub(b'<br />', b'\n', content))
try:
xml_user = xml.xpath('//*[@id="profile"]')
if not xml_user:
return None
else:
xml_user = xml_user[0]
avatar = first(xml_user.xpath('.//img/@src'))
city = first(xml_user.xpath('.//div[@class="user-info"]/a/text()'))
city_url = first(xml_user.xpath('.//div[@class="user-info"]/a/@href'))
text_created_at = xml_user.xpath('.//div[@class="pl"]/text()')[1]
created_at = re.match(r'.+(?=加入)', text_created_at.strip()).group()
xml_intro = first(xml.xpath('//*[@id="intro_display"]'))
intro = xml_intro.xpath('string(.)') if xml_intro is not None else None
nickname = first(xml.xpath('//*[@id="db-usr-profile"]//h1/text()'), '').strip() or None
signature = first(xml.xpath('//*[@id="display"]/text()'))
xml_contact_count = xml.xpath('//*[@id="friend"]/h2')[0]
contact_count = int(re.search(r'成员(\d+)', xml_contact_count.xpath('string(.)')).groups()[0])
text_rev_contact_count = xml.xpath('//p[@class="rev-link"]/a/text()')[0]
rev_contact_count = int(re.search(r'(\d+)人关注', text_rev_contact_count.strip()).groups()[0])
return {
'alias': user_alias,
'url': API_PEOPLE_HOME % user_alias,
'avatar': avatar,
'city': city,
'city_url': city_url,
'created_at': created_at,
'intro': intro,
'nickname': nickname,
'signature': signature,
'contact_count': contact_count,
'rev_contact_count': rev_contact_count,
}
except Exception as e:
self.api.logger.exception('parse people meta error: %s' % e) | 获取用户信息
:param user_alias: 用户ID
:return: |
def _strOrDate(st):
'''internal'''
if isinstance(st, string_types):
return st
elif isinstance(st, datetime):
return st.strftime('%Y%m%d')
raise PyEXception('Not a date: %s', str(st)) | internal |
def touch(self, connection=None):
"""
Mark this update as complete.
IMPORTANT, If the marker table doesn't exist,
the connection transaction will be aborted and the connection reset.
Then the marker table will be created.
"""
self.create_marker_table()
if connection is None:
connection = self.connect()
connection.autocommit = True # if connection created here, we commit it here
connection.cursor().execute(
"""INSERT INTO {marker_table} (update_id, target_table)
VALUES (%s, %s)
ON DUPLICATE KEY UPDATE
update_id = VALUES(update_id)
""".format(marker_table=self.marker_table),
(self.update_id, self.table)
)
# make sure update is properly marked
assert self.exists(connection) | Mark this update as complete.
IMPORTANT, If the marker table doesn't exist,
the connection transaction will be aborted and the connection reset.
Then the marker table will be created. |
def get_or_create_ec2_key_pair(name=None, verbose=1):
"""
Creates and saves an EC2 key pair to a local PEM file.
"""
verbose = int(verbose)
name = name or env.vm_ec2_keypair_name
pem_path = 'roles/%s/%s.pem' % (env.ROLE, name)
conn = get_ec2_connection()
kp = conn.get_key_pair(name)
if kp:
print('Key pair %s already exists.' % name)
else:
# Note, we only get the private key during creation.
# If we don't save it here, it's lost forever.
kp = conn.create_key_pair(name)
open(pem_path, 'wb').write(kp.material)
os.system('chmod 600 %s' % pem_path)
print('Key pair %s created.' % name)
#return kp
return pem_path | Creates and saves an EC2 key pair to a local PEM file. |
def get(self, hash, account="*", max_transactions=100, min_confirmations=6, raw=False):
"""
Args:
hash: can be a bitcoin address or a transaction id. If it's a
bitcoin address it will return a list of transactions up to
``max_transactions`` a list of unspents with confirmed
transactions greater or equal to ``min_confirmantions``
account (Optional[str]): used when using the bitcoind. bitcoind
does not provide an easy way to retrieve transactions for a
single address. By using account we can retrieve transactions
for addresses in a specific account
Returns:
transaction
"""
if len(hash) < 64:
txs = self._service.list_transactions(hash, account=account, max_transactions=max_transactions)
unspents = self._service.list_unspents(hash, min_confirmations=min_confirmations)
return {'transactions': txs, 'unspents': unspents}
else:
return self._service.get_transaction(hash, raw=raw) | Args:
hash: can be a bitcoin address or a transaction id. If it's a
bitcoin address it will return a list of transactions up to
``max_transactions`` a list of unspents with confirmed
transactions greater or equal to ``min_confirmantions``
account (Optional[str]): used when using the bitcoind. bitcoind
does not provide an easy way to retrieve transactions for a
single address. By using account we can retrieve transactions
for addresses in a specific account
Returns:
transaction |
def cache_method(func=None, prefix=''):
"""
Cache result of function execution into the `self` object (mostly useful in models).
Calculate cache key based on `args` and `kwargs` of the function (except `self`).
"""
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
cache_key_prefix = prefix or '_cache_{}'.format(func.__name__)
cache_key = get_cache_key(cache_key_prefix, *args, **kwargs)
if not hasattr(self, cache_key):
setattr(self, cache_key, func(self))
return getattr(self, cache_key)
return wrapper
if func is None:
return decorator
else:
return decorator(func) | Cache result of function execution into the `self` object (mostly useful in models).
Calculate cache key based on `args` and `kwargs` of the function (except `self`). |
def get_schema(self):
"""Return the schema."""
path = os.path.join(self._get_schema_folder(), self._name + ".json")
with open(path, "rb") as file:
schema = json.loads(file.read().decode("UTF-8"))
return schema | Return the schema. |
def export(self, path, session):
"""See `Module.export`."""
def variables_saver(variables_path):
if self._saver:
self._saver.save(
session, variables_path,
write_meta_graph=False,
write_state=False)
self._spec._export(path, variables_saver) | See `Module.export`. |
def draw_markers(self):
"""Draw all created markers on the TimeLine Canvas"""
self._canvas_markers.clear()
for marker in self._markers.values():
self.create_marker(marker["category"], marker["start"], marker["finish"], marker) | Draw all created markers on the TimeLine Canvas |
def hide_routemap_holder_route_map_content_set_dampening_half_life(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
set = ET.SubElement(content, "set")
dampening = ET.SubElement(set, "dampening")
half_life = ET.SubElement(dampening, "half-life")
half_life.text = kwargs.pop('half_life')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def reinforce(self, **kwargs):
"""
Reinforces the grid and calculates grid expansion costs.
See :meth:`edisgo.flex_opt.reinforce_grid` for more information.
"""
results = reinforce_grid(
self, max_while_iterations=kwargs.get(
'max_while_iterations', 10),
copy_graph=kwargs.get('copy_graph', False),
timesteps_pfa=kwargs.get('timesteps_pfa', None),
combined_analysis=kwargs.get('combined_analysis', False))
# add measure to Results object
if not kwargs.get('copy_graph', False):
self.network.results.measures = 'grid_expansion'
return results | Reinforces the grid and calculates grid expansion costs.
See :meth:`edisgo.flex_opt.reinforce_grid` for more information. |
def _send_data(self, data, start_offset, file_len):
"""Send the block to the storage service.
This is a utility method that does not modify self.
Args:
data: data to send in str.
start_offset: start offset of the data in relation to the file.
file_len: an int if this is the last data to append to the file.
Otherwise '*'.
"""
headers = {}
end_offset = start_offset + len(data) - 1
if data:
headers['content-range'] = ('bytes %d-%d/%s' %
(start_offset, end_offset, file_len))
else:
headers['content-range'] = ('bytes */%s' % file_len)
status, response_headers, content = self._api.put_object(
self._path_with_token, payload=data, headers=headers)
if file_len == '*':
expected = 308
else:
expected = 200
errors.check_status(status, [expected], self._path, headers,
response_headers, content,
{'upload_path': self._path_with_token}) | Send the block to the storage service.
This is a utility method that does not modify self.
Args:
data: data to send in str.
start_offset: start offset of the data in relation to the file.
file_len: an int if this is the last data to append to the file.
Otherwise '*'. |
def setName( self, name ):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self | Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) |
def kana2alphabet(text):
"""Convert Hiragana to hepburn-style alphabets
Parameters
----------
text : str
Hiragana string.
Return
------
str
Hepburn-style alphabets string.
Examples
--------
>>> print(jaconv.kana2alphabet('まみさん'))
mamisan
"""
text = text.replace('きゃ', 'kya').replace('きゅ', 'kyu').replace('きょ', 'kyo')
text = text.replace('ぎゃ', 'gya').replace('ぎゅ', 'gyu').replace('ぎょ', 'gyo')
text = text.replace('しゃ', 'sha').replace('しゅ', 'shu').replace('しょ', 'sho')
text = text.replace('じゃ', 'ja').replace('じゅ', 'ju').replace('じょ', 'jo')
text = text.replace('ちゃ', 'cha').replace('ちゅ', 'chu').replace('ちょ', 'cho')
text = text.replace('にゃ', 'nya').replace('にゅ', 'nyu').replace('にょ', 'nyo')
text = text.replace('ふぁ', 'fa').replace('ふぃ', 'fi').replace('ふぇ', 'fe')
text = text.replace('ふぉ', 'fo')
text = text.replace('ひゃ', 'hya').replace('ひゅ', 'hyu').replace('ひょ', 'hyo')
text = text.replace('みゃ', 'mya').replace('みゅ', 'myu').replace('みょ', 'myo')
text = text.replace('りゃ', 'rya').replace('りゅ', 'ryu').replace('りょ', 'ryo')
text = text.replace('びゃ', 'bya').replace('びゅ', 'byu').replace('びょ', 'byo')
text = text.replace('ぴゃ', 'pya').replace('ぴゅ', 'pyu').replace('ぴょ', 'pyo')
text = text.replace('が', 'ga').replace('ぎ', 'gi').replace('ぐ', 'gu')
text = text.replace('げ', 'ge').replace('ご', 'go').replace('ざ', 'za')
text = text.replace('じ', 'ji').replace('ず', 'zu').replace('ぜ', 'ze')
text = text.replace('ぞ', 'zo').replace('だ', 'da').replace('ぢ', 'ji')
text = text.replace('づ', 'zu').replace('で', 'de').replace('ど', 'do')
text = text.replace('ば', 'ba').replace('び', 'bi').replace('ぶ', 'bu')
text = text.replace('べ', 'be').replace('ぼ', 'bo').replace('ぱ', 'pa')
text = text.replace('ぴ', 'pi').replace('ぷ', 'pu').replace('ぺ', 'pe')
text = text.replace('ぽ', 'po')
text = text.replace('か', 'ka').replace('き', 'ki').replace('く', 'ku')
text = text.replace('け', 'ke').replace('こ', 'ko').replace('さ', 'sa')
text = text.replace('し', 'shi').replace('す', 'su').replace('せ', 'se')
text = text.replace('そ', 'so').replace('た', 'ta').replace('ち', 'chi')
text = text.replace('つ', 'tsu').replace('て', 'te').replace('と', 'to')
text = text.replace('な', 'na').replace('に', 'ni').replace('ぬ', 'nu')
text = text.replace('ね', 'ne').replace('の', 'no').replace('は', 'ha')
text = text.replace('ひ', 'hi').replace('ふ', 'fu').replace('へ', 'he')
text = text.replace('ほ', 'ho').replace('ま', 'ma').replace('み', 'mi')
text = text.replace('む', 'mu').replace('め', 'me').replace('も', 'mo')
text = text.replace('ら', 'ra').replace('り', 'ri').replace('る', 'ru')
text = text.replace('れ', 're').replace('ろ', 'ro')
text = text.replace('や', 'ya').replace('ゆ', 'yu').replace('よ', 'yo')
text = text.replace('わ', 'wa').replace('ゐ', 'wi').replace('を', 'wo')
text = text.replace('ゑ', 'we')
text = _convert(text, KANA2HEP)
while 'っ' in text:
text = list(text)
tsu_pos = text.index('っ')
if len(text) <= tsu_pos + 1:
return ''.join(text[:-1]) + 'xtsu'
if tsu_pos == 0:
text[tsu_pos] = 'xtsu'
else:
text[tsu_pos] = text[tsu_pos + 1]
text = ''.join(text)
return text | Convert Hiragana to hepburn-style alphabets
Parameters
----------
text : str
Hiragana string.
Return
------
str
Hepburn-style alphabets string.
Examples
--------
>>> print(jaconv.kana2alphabet('まみさん'))
mamisan |
def bipartition(seq):
"""Return a list of bipartitions for a sequence.
Args:
a (Iterable): The sequence to partition.
Returns:
list[tuple[tuple]]: A list of tuples containing each of the two
partitions.
Example:
>>> bipartition((1,2,3))
[((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,))]
"""
return [(tuple(seq[i] for i in part0_idx),
tuple(seq[j] for j in part1_idx))
for part0_idx, part1_idx in bipartition_indices(len(seq))] | Return a list of bipartitions for a sequence.
Args:
a (Iterable): The sequence to partition.
Returns:
list[tuple[tuple]]: A list of tuples containing each of the two
partitions.
Example:
>>> bipartition((1,2,3))
[((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,))] |
def get_all_fields(self, arr):
"""
Returns a list containing this struct's fields and all the fields of
its ancestors. Used during validation.
"""
for k, v in self.fields.items():
arr.append(v)
if self.extends:
parent = self.contract.get(self.extends)
if parent:
return parent.get_all_fields(arr)
return arr | Returns a list containing this struct's fields and all the fields of
its ancestors. Used during validation. |
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt] | Parse one line of the XFS info output. |
def delist(target):
''' for any "list" found, replace with a single entry if the list has exactly one entry '''
result = target
if type(target) is dict:
for key in target:
target[key] = delist(target[key])
if type(target) is list:
if len(target)==0:
result = None
elif len(target)==1:
result = delist(target[0])
else:
result = [delist(e) for e in target]
return result | for any "list" found, replace with a single entry if the list has exactly one entry |
def add_oxidation_state_by_guess(self, **kwargs):
"""
Decorates the structure with oxidation state, guessing
using Composition.oxi_state_guesses()
Args:
**kwargs: parameters to pass into oxi_state_guesses()
"""
oxid_guess = self.composition.oxi_state_guesses(**kwargs)
oxid_guess = oxid_guess or \
[dict([(e.symbol, 0) for e in self.composition])]
self.add_oxidation_state_by_element(oxid_guess[0]) | Decorates the structure with oxidation state, guessing
using Composition.oxi_state_guesses()
Args:
**kwargs: parameters to pass into oxi_state_guesses() |
def dictlist_replace(dict_list: Iterable[Dict], key: str, value: Any) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, change
(in place) ``d[key]`` to ``value``.
"""
for d in dict_list:
d[key] = value | Process an iterable of dictionaries. For each dictionary ``d``, change
(in place) ``d[key]`` to ``value``. |
def run(host='0.0.0.0', port=5000, reload=True, debug=True):
""" Run development server """
from werkzeug.serving import run_simple
app = bootstrap.get_app()
return run_simple(
hostname=host,
port=port,
application=app,
use_reloader=reload,
use_debugger=debug,
) | Run development server |
def get_documents(self):
"""Return all the parsed ``Documents`` in the database.
:rtype: A list of all ``Documents`` in the database ordered by name.
"""
return self.session.query(Document).order_by(Document.name).all() | Return all the parsed ``Documents`` in the database.
:rtype: A list of all ``Documents`` in the database ordered by name. |
def display_widgets(self):
"""
Displays all the widgets associated with this Container.
Should be called when the widgets need to be "re-packed/gridded".
"""
# All widgets are removed and then recreated to ensure the order they
# were created is the order they are displayed.
for child in self.children:
if child.displayable:
# forget the widget
if self.layout != "grid":
child.tk.pack_forget()
else:
child.tk.grid_forget()
# display the widget
if child.visible:
if self.layout != "grid":
self._pack_widget(child)
else:
self._grid_widget(child) | Displays all the widgets associated with this Container.
Should be called when the widgets need to be "re-packed/gridded". |
def remove(self, docid):
"""
Remove a document from the database.
"""
docid = int(docid)
self.store.executeSQL(self.removeSQL, (docid,)) | Remove a document from the database. |
def _id(self):
"""Handle identifiers and reserverd keywords."""
result = ''
while self.char is not None and (self.char.isalnum() or self.char == '_'):
result += self.char
self.advance()
token = RESERVED_KEYWORDS.get(result, Token(Nature.ID, result))
return token | Handle identifiers and reserverd keywords. |
def surface_state(num_lat=90,
num_lon=None,
water_depth=10.,
T0=12.,
T2=-40.):
"""Sets up a state variable dictionary for a surface model
(e.g. :class:`~climlab.model.ebm.EBM`) with a uniform slab ocean depth.
The domain is either 1D (latitude) or 2D (latitude, longitude)
depending on whether the input argument num_lon is supplied.
Returns a single state variable `Ts`, the temperature of the surface
mixed layer (slab ocean).
The temperature is initialized to a smooth equator-to-pole shape given by
.. math::
T(\phi) = T_0 + T_2 P_2(\sin\phi)
where :math:`\phi` is latitude, and :math:`P_2` is the second Legendre
polynomial :class:`~climlab.utils.legendre.P2`.
**Function-call arguments** \n
:param int num_lat: number of latitude points [default: 90]
:param int num_lat: (optional) number of longitude points [default: None]
:param float water_depth: depth of the slab ocean in meters [default: 10.]
:param float T0: global-mean initial temperature in :math:`^{\circ} \\textrm{C}` [default: 12.]
:param float T2: 2nd Legendre coefficient for equator-to-pole gradient in
initial temperature, in :math:`^{\circ} \\textrm{C}` [default: -40.]
:returns: dictionary with temperature
:class:`~climlab.domain.field.Field`
for surface mixed layer ``Ts``
:rtype: dict
:Example:
::
>>> from climlab.domain import initial
>>> import numpy as np
>>> T_dict = initial.surface_state(num_lat=36)
>>> print np.squeeze(T_dict['Ts'])
[-27.88584094 -26.97777479 -25.18923361 -22.57456133 -19.21320344
-15.20729309 -10.67854785 -5.76457135 -0.61467228 4.61467228
9.76457135 14.67854785 19.20729309 23.21320344 26.57456133
29.18923361 30.97777479 31.88584094 31.88584094 30.97777479
29.18923361 26.57456133 23.21320344 19.20729309 14.67854785
9.76457135 4.61467228 -0.61467228 -5.76457135 -10.67854785
-15.20729309 -19.21320344 -22.57456133 -25.18923361 -26.97777479
-27.88584094]
"""
if num_lon is None:
sfc = domain.zonal_mean_surface(num_lat=num_lat,
water_depth=water_depth)
else:
sfc = domain.surface_2D(num_lat=num_lat,
num_lon=num_lon,
water_depth=water_depth)
if 'lon' in sfc.axes:
lon, lat = np.meshgrid(sfc.axes['lon'].points, sfc.axes['lat'].points)
else:
lat = sfc.axes['lat'].points
sinphi = np.sin(np.deg2rad(lat))
initial = T0 + T2 * legendre.P2(sinphi)
Ts = Field(initial, domain=sfc)
#if num_lon is None:
# Ts = Field(initial, domain=sfc)
#else:
# Ts = Field([[initial for k in range(num_lon)]], domain=sfc)
state = AttrDict()
state['Ts'] = Ts
return state | Sets up a state variable dictionary for a surface model
(e.g. :class:`~climlab.model.ebm.EBM`) with a uniform slab ocean depth.
The domain is either 1D (latitude) or 2D (latitude, longitude)
depending on whether the input argument num_lon is supplied.
Returns a single state variable `Ts`, the temperature of the surface
mixed layer (slab ocean).
The temperature is initialized to a smooth equator-to-pole shape given by
.. math::
T(\phi) = T_0 + T_2 P_2(\sin\phi)
where :math:`\phi` is latitude, and :math:`P_2` is the second Legendre
polynomial :class:`~climlab.utils.legendre.P2`.
**Function-call arguments** \n
:param int num_lat: number of latitude points [default: 90]
:param int num_lat: (optional) number of longitude points [default: None]
:param float water_depth: depth of the slab ocean in meters [default: 10.]
:param float T0: global-mean initial temperature in :math:`^{\circ} \\textrm{C}` [default: 12.]
:param float T2: 2nd Legendre coefficient for equator-to-pole gradient in
initial temperature, in :math:`^{\circ} \\textrm{C}` [default: -40.]
:returns: dictionary with temperature
:class:`~climlab.domain.field.Field`
for surface mixed layer ``Ts``
:rtype: dict
:Example:
::
>>> from climlab.domain import initial
>>> import numpy as np
>>> T_dict = initial.surface_state(num_lat=36)
>>> print np.squeeze(T_dict['Ts'])
[-27.88584094 -26.97777479 -25.18923361 -22.57456133 -19.21320344
-15.20729309 -10.67854785 -5.76457135 -0.61467228 4.61467228
9.76457135 14.67854785 19.20729309 23.21320344 26.57456133
29.18923361 30.97777479 31.88584094 31.88584094 30.97777479
29.18923361 26.57456133 23.21320344 19.20729309 14.67854785
9.76457135 4.61467228 -0.61467228 -5.76457135 -10.67854785
-15.20729309 -19.21320344 -22.57456133 -25.18923361 -26.97777479
-27.88584094] |
def shift(func, *args, **kwargs):
"""This function is basically a beefed up lambda x: func(x, *args, **kwargs)
:func:`shift` comes in handy when it is used in a pipeline with a function that
needs the passed value as its first argument.
:param func: a function
:param args: objects
:param kwargs: keywords
>>> def div(x, y): return float(x) / y
This is equivalent to div(42, 2)::
>>> shift(div, 2)(42)
21.0
which is different from div(2, 42)::
>>> from functools import partial
>>> partial(div, 2)(42)
0.047619047619047616
"""
@wraps(func)
def wrapped(x):
return func(x, *args, **kwargs)
return wrapped | This function is basically a beefed up lambda x: func(x, *args, **kwargs)
:func:`shift` comes in handy when it is used in a pipeline with a function that
needs the passed value as its first argument.
:param func: a function
:param args: objects
:param kwargs: keywords
>>> def div(x, y): return float(x) / y
This is equivalent to div(42, 2)::
>>> shift(div, 2)(42)
21.0
which is different from div(2, 42)::
>>> from functools import partial
>>> partial(div, 2)(42)
0.047619047619047616 |
def task(**kwargs):
"""A function task decorator used in place of ``@celery_app.task``."""
def wrapper(wrapped):
def callback(scanner, name, obj):
celery_app = scanner.config.registry.celery_app
celery_app.task(**kwargs)(obj)
venusian.attach(wrapped, callback)
return wrapped
return wrapper | A function task decorator used in place of ``@celery_app.task``. |
def import_keyset(self, keyset):
"""Imports a RFC 7517 keyset using the standard JSON format.
:param keyset: The RFC 7517 representation of a JOSE Keyset.
"""
try:
jwkset = json_decode(keyset)
except Exception: # pylint: disable=broad-except
raise InvalidJWKValue()
if 'keys' not in jwkset:
raise InvalidJWKValue()
for k, v in iteritems(jwkset):
if k == 'keys':
for jwk in v:
self['keys'].add(JWK(**jwk))
else:
self[k] = v | Imports a RFC 7517 keyset using the standard JSON format.
:param keyset: The RFC 7517 representation of a JOSE Keyset. |
def integratedAutocorrelationTime(A_n, B_n=None, fast=False, mintime=3):
"""Estimate the integrated autocorrelation time.
See Also
--------
statisticalInefficiency
"""
g = statisticalInefficiency(A_n, B_n, fast, mintime)
tau = (g - 1.0) / 2.0
return tau | Estimate the integrated autocorrelation time.
See Also
--------
statisticalInefficiency |
def consulta(self, endereco, primeiro=False,
uf=None, localidade=None, tipo=None, numero=None):
"""Consulta site e retorna lista de resultados"""
if uf is None:
url = 'consultaEnderecoAction.do'
data = {
'relaxation': endereco.encode('ISO-8859-1'),
'TipoCep': 'ALL',
'semelhante': 'N',
'cfm': 1,
'Metodo': 'listaLogradouro',
'TipoConsulta': 'relaxation',
'StartRow': '1',
'EndRow': '10'
}
else:
url = 'consultaLogradouroAction.do'
data = {
'Logradouro': endereco.encode('ISO-8859-1'),
'UF': uf,
'TIPO': tipo,
'Localidade': localidade.encode('ISO-8859-1'),
'Numero': numero,
'cfm': 1,
'Metodo': 'listaLogradouro',
'TipoConsulta': 'logradouro',
'StartRow': '1',
'EndRow': '10'
}
h = self._url_open(url, data)
html = h.read()
if primeiro:
return self.detalhe()
else:
return self._parse_tabela(html) | Consulta site e retorna lista de resultados |
def _update_task(self, task):
"""
Assigns current task step to self.task
then updates the task's data with self.task_data
Args:
task: Task object.
"""
self.task = task
self.task.data.update(self.task_data)
self.task_type = task.task_spec.__class__.__name__
self.spec = task.task_spec
self.task_name = task.get_name()
self.activity = getattr(self.spec, 'service_class', '')
self._set_lane_data() | Assigns current task step to self.task
then updates the task's data with self.task_data
Args:
task: Task object. |
def _set_overlay_service_policy(self, v, load=False):
"""
Setter method for overlay_service_policy, mapped from YANG variable /overlay_gateway/overlay_service_policy (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_overlay_service_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_overlay_service_policy() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=overlay_service_policy.overlay_service_policy, is_container='container', presence=False, yang_name="overlay-service-policy", rest_name="overlay-service-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Attach Overlay policy Map to overlay-gateway'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """overlay_service_policy must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=overlay_service_policy.overlay_service_policy, is_container='container', presence=False, yang_name="overlay-service-policy", rest_name="overlay-service-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Attach Overlay policy Map to overlay-gateway'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)""",
})
self.__overlay_service_policy = t
if hasattr(self, '_set'):
self._set() | Setter method for overlay_service_policy, mapped from YANG variable /overlay_gateway/overlay_service_policy (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_overlay_service_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_overlay_service_policy() directly. |
def _process_using_meta_feature_generator(self, X, meta_feature_generator):
"""Process using secondary learner meta-feature generator
Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba,
this internal method gives the ability to use any string. Just make sure secondary learner
has the method.
Args:
X (array-like): Features array
meta_feature_generator (str, unicode): Method for use by secondary learner
"""
all_learner_meta_features = []
for idx, base_learner in enumerate(self.base_learners):
single_learner_meta_features = getattr(base_learner,
self.meta_feature_generators[idx])(X)
if len(single_learner_meta_features.shape) == 1:
single_learner_meta_features = single_learner_meta_features.reshape(-1, 1)
all_learner_meta_features.append(single_learner_meta_features)
all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1)
out = getattr(self.secondary_learner, meta_feature_generator)(all_learner_meta_features)
return out | Process using secondary learner meta-feature generator
Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba,
this internal method gives the ability to use any string. Just make sure secondary learner
has the method.
Args:
X (array-like): Features array
meta_feature_generator (str, unicode): Method for use by secondary learner |
def extract_input(pipe_def=None, pipe_generator=None):
"""Extract inputs required by a pipe"""
if pipe_def:
pyinput = gen_input(pipe_def)
elif pipe_generator:
pyinput = pipe_generator(Context(describe_input=True))
else:
raise Exception('Must supply at least one kwarg!')
return sorted(list(pyinput)) | Extract inputs required by a pipe |
def validate_units(self):
"""Ensure that wavelenth and flux units belong to the
correct classes.
Raises
------
TypeError
Wavelength unit is not `~pysynphot.units.WaveUnits` or
flux unit is not `~pysynphot.units.FluxUnits`.
"""
if (not isinstance(self.waveunits, units.WaveUnits)):
raise TypeError("%s is not a valid WaveUnit" % self.waveunits)
if (not isinstance(self.fluxunits, units.FluxUnits)):
raise TypeError("%s is not a valid FluxUnit" % self.fluxunits) | Ensure that wavelenth and flux units belong to the
correct classes.
Raises
------
TypeError
Wavelength unit is not `~pysynphot.units.WaveUnits` or
flux unit is not `~pysynphot.units.FluxUnits`. |
def clear(self):
"""
Removes all data from the buffer.
"""
self.io.seek(0)
self.io.truncate()
for item in self.monitors:
item[2] = 0 | Removes all data from the buffer. |
def propertyWidgetMap(self):
"""
Returns the mapping for this page between its widgets and its
scaffold property.
:return {<projex.scaffold.Property>: <QtGui.QWidget>, ..}
"""
out = {}
scaffold = self.scaffold()
# initialize the scaffold properties
for widget in self.findChildren(QtGui.QWidget):
propname = unwrapVariant(widget.property('propertyName'))
if not propname: continue
prop = scaffold.property(propname)
if not prop: continue
out[prop] = widget
return out | Returns the mapping for this page between its widgets and its
scaffold property.
:return {<projex.scaffold.Property>: <QtGui.QWidget>, ..} |
def directory_open(self, path, filter_p, flags):
"""Opens a directory in the guest and creates a :py:class:`IGuestDirectory`
object that can be used for further operations.
This method follows symbolic links by default at the moment, this
may change in the future.
in path of type str
Path to the directory to open. Guest path style.
in filter_p of type str
Optional directory listing filter to apply. This uses the DOS/NT
style wildcard characters '?' and '*'.
in flags of type :class:`DirectoryOpenFlag`
Zero or more :py:class:`DirectoryOpenFlag` flags.
return directory of type :class:`IGuestDirectory`
:py:class:`IGuestDirectory` object containing the opened directory.
raises :class:`VBoxErrorObjectNotFound`
Directory to open was not found.
raises :class:`VBoxErrorIprtError`
Error while opening the directory.
raises :class:`VBoxErrorMaximumReached`
The maximum of concurrent guest directories has been reached.
"""
if not isinstance(path, basestring):
raise TypeError("path can only be an instance of type basestring")
if not isinstance(filter_p, basestring):
raise TypeError("filter_p can only be an instance of type basestring")
if not isinstance(flags, list):
raise TypeError("flags can only be an instance of type list")
for a in flags[:10]:
if not isinstance(a, DirectoryOpenFlag):
raise TypeError(
"array can only contain objects of type DirectoryOpenFlag")
directory = self._call("directoryOpen",
in_p=[path, filter_p, flags])
directory = IGuestDirectory(directory)
return directory | Opens a directory in the guest and creates a :py:class:`IGuestDirectory`
object that can be used for further operations.
This method follows symbolic links by default at the moment, this
may change in the future.
in path of type str
Path to the directory to open. Guest path style.
in filter_p of type str
Optional directory listing filter to apply. This uses the DOS/NT
style wildcard characters '?' and '*'.
in flags of type :class:`DirectoryOpenFlag`
Zero or more :py:class:`DirectoryOpenFlag` flags.
return directory of type :class:`IGuestDirectory`
:py:class:`IGuestDirectory` object containing the opened directory.
raises :class:`VBoxErrorObjectNotFound`
Directory to open was not found.
raises :class:`VBoxErrorIprtError`
Error while opening the directory.
raises :class:`VBoxErrorMaximumReached`
The maximum of concurrent guest directories has been reached. |
def parse_date(datestring):
"""Attepmts to parse an ISO8601 formatted ``datestring``.
Returns a ``datetime.datetime`` object.
"""
datestring = str(datestring).strip()
if not datestring[0].isdigit():
raise ParseError()
if 'W' in datestring.upper():
try:
datestring = datestring[:-1] + str(int(datestring[-1:]) -1)
except:
pass
for regex, pattern in DATE_FORMATS:
if regex.match(datestring):
found = regex.search(datestring).groupdict()
dt = datetime.utcnow().strptime(found['matched'], pattern)
if 'fraction' in found and found['fraction'] is not None:
dt = dt.replace(microsecond=int(found['fraction'][1:]))
if 'timezone' in found and found['timezone'] is not None:
dt = dt.replace(tzinfo=Timezone(found.get('timezone', '')))
return dt
return parse_time(datestring) | Attepmts to parse an ISO8601 formatted ``datestring``.
Returns a ``datetime.datetime`` object. |
def makedirs(path, ignore_extsep=False):
"""Makes all directories required for given path; returns true if successful
and false otherwise.
**Examples**:
::
auxly.filesys.makedirs("bar/baz")
"""
if not ignore_extsep and op.basename(path).find(os.extsep) > -1:
path = op.dirname(path)
try:
os.makedirs(path)
except:
return False
return True | Makes all directories required for given path; returns true if successful
and false otherwise.
**Examples**:
::
auxly.filesys.makedirs("bar/baz") |
def cross_origin(app, *args, **kwargs):
"""
This function is the decorator which is used to wrap a Sanic route with.
In the simplest case, simply use the default parameters to allow all
origins in what is the most permissive configuration. If this method
modifies state or performs authentication which may be brute-forced, you
should add some degree of protection, such as Cross Site Forgery
Request protection.
:param origins:
The origin, or list of origins to allow requests from.
The origin(s) may be regular expressions, case-sensitive strings,
or else an asterisk
Default : '*'
:type origins: list, string or regex
:param methods:
The method or list of methods which the allowed origins are allowed to
access for non-simple requests.
Default : [GET, HEAD, POST, OPTIONS, PUT, PATCH, DELETE]
:type methods: list or string
:param expose_headers:
The header or list which are safe to expose to the API of a CORS API
specification.
Default : None
:type expose_headers: list or string
:param allow_headers:
The header or list of header field names which can be used when this
resource is accessed by allowed origins. The header(s) may be regular
expressions, case-sensitive strings, or else an asterisk.
Default : '*', allow all headers
:type allow_headers: list, string or regex
:param supports_credentials:
Allows users to make authenticated requests. If true, injects the
`Access-Control-Allow-Credentials` header in responses. This allows
cookies and credentials to be submitted across domains.
:note: This option cannot be used in conjuction with a '*' origin
Default : False
:type supports_credentials: bool
:param max_age:
The maximum time for which this CORS request maybe cached. This value
is set as the `Access-Control-Max-Age` header.
Default : None
:type max_age: timedelta, integer, string or None
:param send_wildcard: If True, and the origins parameter is `*`, a wildcard
`Access-Control-Allow-Origin` header is sent, rather than the
request's `Origin` header.
Default : False
:type send_wildcard: bool
:param vary_header:
If True, the header Vary: Origin will be returned as per the W3
implementation guidelines.
Setting this header when the `Access-Control-Allow-Origin` is
dynamically generated (e.g. when there is more than one allowed
origin, and an Origin than '*' is returned) informs CDNs and other
caches that the CORS headers are dynamic, and cannot be cached.
If False, the Vary header will never be injected or altered.
Default : True
:type vary_header: bool
:param automatic_options:
Only applies to the `cross_origin` decorator. If True, Sanic-CORS will
override Sanic's default OPTIONS handling to return CORS headers for
OPTIONS requests.
Default : True
:type automatic_options: bool
"""
_options = kwargs
_real_decorator = cors.decorate(app, *args, run_middleware=False, with_context=False, **kwargs)
def wrapped_decorator(f):
spf = SanicPluginsFramework(app) # get the singleton from the app
try:
plugin = spf.register_plugin(cors, skip_reg=True)
except ValueError as e:
# this is normal, if this plugin has been registered previously
assert e.args and len(e.args) > 1
plugin = e.args[1]
context = cors.get_context_from_spf(spf)
log = context.log
log(logging.DEBUG, "Enabled {:s} for cross_origin using options: {}".format(str(f), str(_options)))
return _real_decorator(f)
return wrapped_decorator | This function is the decorator which is used to wrap a Sanic route with.
In the simplest case, simply use the default parameters to allow all
origins in what is the most permissive configuration. If this method
modifies state or performs authentication which may be brute-forced, you
should add some degree of protection, such as Cross Site Forgery
Request protection.
:param origins:
The origin, or list of origins to allow requests from.
The origin(s) may be regular expressions, case-sensitive strings,
or else an asterisk
Default : '*'
:type origins: list, string or regex
:param methods:
The method or list of methods which the allowed origins are allowed to
access for non-simple requests.
Default : [GET, HEAD, POST, OPTIONS, PUT, PATCH, DELETE]
:type methods: list or string
:param expose_headers:
The header or list which are safe to expose to the API of a CORS API
specification.
Default : None
:type expose_headers: list or string
:param allow_headers:
The header or list of header field names which can be used when this
resource is accessed by allowed origins. The header(s) may be regular
expressions, case-sensitive strings, or else an asterisk.
Default : '*', allow all headers
:type allow_headers: list, string or regex
:param supports_credentials:
Allows users to make authenticated requests. If true, injects the
`Access-Control-Allow-Credentials` header in responses. This allows
cookies and credentials to be submitted across domains.
:note: This option cannot be used in conjuction with a '*' origin
Default : False
:type supports_credentials: bool
:param max_age:
The maximum time for which this CORS request maybe cached. This value
is set as the `Access-Control-Max-Age` header.
Default : None
:type max_age: timedelta, integer, string or None
:param send_wildcard: If True, and the origins parameter is `*`, a wildcard
`Access-Control-Allow-Origin` header is sent, rather than the
request's `Origin` header.
Default : False
:type send_wildcard: bool
:param vary_header:
If True, the header Vary: Origin will be returned as per the W3
implementation guidelines.
Setting this header when the `Access-Control-Allow-Origin` is
dynamically generated (e.g. when there is more than one allowed
origin, and an Origin than '*' is returned) informs CDNs and other
caches that the CORS headers are dynamic, and cannot be cached.
If False, the Vary header will never be injected or altered.
Default : True
:type vary_header: bool
:param automatic_options:
Only applies to the `cross_origin` decorator. If True, Sanic-CORS will
override Sanic's default OPTIONS handling to return CORS headers for
OPTIONS requests.
Default : True
:type automatic_options: bool |
def match(self, *args):
"""Whether or not to enter a given case statement"""
self.fall = self.fall or not args
self.fall = self.fall or (self.value in args)
return self.fall | Whether or not to enter a given case statement |
def add_to_context(self, name, **attrs):
"""Add attributes to a context.
"""
context = self.get_context(name=name)
attrs_ = context['context']
attrs_.update(**attrs) | Add attributes to a context. |
def find_by(cls, parent=None, **attributes):
"""
Gets the first resource of the given type and parent (if provided) with matching attributes.
This will trigger an api GET request.
:param parent ResourceBase: the parent of the resource - used for nesting the request url, optional
:param **attributes: any number of keyword arguments as attributes to search the resource by
:returns: the matching resource, None if not found
:raises ResourceError: if the no valid attributes are provided
"""
all_nones = not all(attributes.values())
if not attributes or all_nones:
raise cls.ResourceError('at least one attribute must be provided')
matches = cls.filter(parent, **attributes)
if matches:
return matches[0] | Gets the first resource of the given type and parent (if provided) with matching attributes.
This will trigger an api GET request.
:param parent ResourceBase: the parent of the resource - used for nesting the request url, optional
:param **attributes: any number of keyword arguments as attributes to search the resource by
:returns: the matching resource, None if not found
:raises ResourceError: if the no valid attributes are provided |
def get_history_kline(self,
code,
start=None,
end=None,
ktype=KLType.K_DAY,
autype=AuType.QFQ,
fields=[KL_FIELD.ALL]):
"""
得到本地历史k线,需先参照帮助文档下载k线
:param code: 股票代码
:param start: 开始时间,例如'2017-06-20'
:param end: 结束时间,例如'2017-06-30'
start和end的组合如下:
========== ========== ========================================
start类型 end类型 说明
========== ========== ========================================
str str start和end分别为指定的日期
None str start为end往前365天
str None end为start往后365天
None None end为当前日期,start为end往前365天
========== ========== ========================================
:param ktype: k线类型, 参见 KLType 定义
:param autype: 复权类型, 参见 AuType 定义
:param fields: 需返回的字段列表,参见 KL_FIELD 定义 KL_FIELD.ALL KL_FIELD.OPEN ....
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
================= =========== ==============================================================================
参数 类型 说明
================= =========== ==============================================================================
code str 股票代码
time_key str k线时间
open float 开盘价
close float 收盘价
high float 最高价
low float 最低价
pe_ratio float 市盈率(该字段为比例字段,默认不展示%)
turnover_rate float 换手率
volume int 成交量
turnover float 成交额
change_rate float 涨跌幅
last_close float 昨收价
================= =========== ==============================================================================
:example:
.. code:: python
from futuquant import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
print(quote_ctx.get_history_kline('HK.00700', start='2017-06-20', end='2017-06-22'))
quote_ctx.close()
"""
return self._get_history_kline_impl(GetHistoryKlineQuery, code, start=start, end=end,
ktype=ktype, autype=autype, fields=fields) | 得到本地历史k线,需先参照帮助文档下载k线
:param code: 股票代码
:param start: 开始时间,例如'2017-06-20'
:param end: 结束时间,例如'2017-06-30'
start和end的组合如下:
========== ========== ========================================
start类型 end类型 说明
========== ========== ========================================
str str start和end分别为指定的日期
None str start为end往前365天
str None end为start往后365天
None None end为当前日期,start为end往前365天
========== ========== ========================================
:param ktype: k线类型, 参见 KLType 定义
:param autype: 复权类型, 参见 AuType 定义
:param fields: 需返回的字段列表,参见 KL_FIELD 定义 KL_FIELD.ALL KL_FIELD.OPEN ....
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
================= =========== ==============================================================================
参数 类型 说明
================= =========== ==============================================================================
code str 股票代码
time_key str k线时间
open float 开盘价
close float 收盘价
high float 最高价
low float 最低价
pe_ratio float 市盈率(该字段为比例字段,默认不展示%)
turnover_rate float 换手率
volume int 成交量
turnover float 成交额
change_rate float 涨跌幅
last_close float 昨收价
================= =========== ==============================================================================
:example:
.. code:: python
from futuquant import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
print(quote_ctx.get_history_kline('HK.00700', start='2017-06-20', end='2017-06-22'))
quote_ctx.close() |
def bgblack(cls, string, auto=False):
"""Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color
"""
return cls.colorize('bgblack', string, auto=auto) | Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color |
def reporter(self):
"""
Create the MASH report
"""
logging.info('Creating {} report'.format(self.analysistype))
make_path(self.reportpath)
header = 'Strain,ReferenceGenus,ReferenceFile,ReferenceGenomeMashDistance,Pvalue,NumMatchingHashes\n'
data = ''
for sample in self.metadata:
try:
data += '{},{},{},{},{},{}\n'.format(sample.name,
sample[self.analysistype].closestrefseqgenus,
sample[self.analysistype].closestrefseq,
sample[self.analysistype].mashdistance,
sample[self.analysistype].pvalue,
sample[self.analysistype].nummatches)
except AttributeError:
data += '{}\n'.format(sample.name)
# Create the report file
reportfile = os.path.join(self.reportpath, 'mash.csv')
with open(reportfile, 'w') as report:
report.write(header)
report.write(data) | Create the MASH report |
def stats(self, index=None, metric=None, params=None):
"""
Retrieve statistics on different operations happening on an index.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg metric: Limit the information returned the specific metrics.
:arg completion_fields: A comma-separated list of fields for `fielddata`
and `suggest` index metric (supports wildcards)
:arg fielddata_fields: A comma-separated list of fields for `fielddata`
index metric (supports wildcards)
:arg fields: A comma-separated list of fields for `fielddata` and
`completion` index metric (supports wildcards)
:arg groups: A comma-separated list of search groups for `search` index
metric
:arg include_segment_file_sizes: Whether to report the aggregated disk
usage of each one of the Lucene index files (only applies if segment
stats are requested), default False
:arg level: Return stats aggregated at cluster, index or shard level,
default 'indices', valid choices are: 'cluster', 'indices', 'shards'
:arg types: A comma-separated list of document types for the `indexing`
index metric
"""
return self.transport.perform_request(
"GET", _make_path(index, "_stats", metric), params=params
) | Retrieve statistics on different operations happening on an index.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg metric: Limit the information returned the specific metrics.
:arg completion_fields: A comma-separated list of fields for `fielddata`
and `suggest` index metric (supports wildcards)
:arg fielddata_fields: A comma-separated list of fields for `fielddata`
index metric (supports wildcards)
:arg fields: A comma-separated list of fields for `fielddata` and
`completion` index metric (supports wildcards)
:arg groups: A comma-separated list of search groups for `search` index
metric
:arg include_segment_file_sizes: Whether to report the aggregated disk
usage of each one of the Lucene index files (only applies if segment
stats are requested), default False
:arg level: Return stats aggregated at cluster, index or shard level,
default 'indices', valid choices are: 'cluster', 'indices', 'shards'
:arg types: A comma-separated list of document types for the `indexing`
index metric |
def _create_archive(self):
'''This will create a tar.gz compressed archive of the scrubbed directory'''
try:
self.archive_path = os.path.join(self.report_dir, "%s.tar.gz" % self.session)
self.logger.con_out('Creating SOSCleaner Archive - %s', self.archive_path)
t = tarfile.open(self.archive_path, 'w:gz')
for dirpath, dirnames, filenames in os.walk(self.dir_path):
for f in filenames:
f_full = os.path.join(dirpath, f)
f_archive = f_full.replace(self.report_dir,'')
self.logger.debug('adding %s to %s archive', f_archive, self.archive_path)
t.add(f_full, arcname=f_archive)
except Exception as e: #pragma: no cover
self.logger.exception(e)
raise Exception('CreateArchiveError: Unable to create Archive')
self._clean_up()
self.logger.info('Archiving Complete')
self.logger.con_out('SOSCleaner Complete')
if not self.quiet: # pragma: no cover
t.add(self.logfile, arcname=self.logfile.replace(self.report_dir,''))
t.close() | This will create a tar.gz compressed archive of the scrubbed directory |
def _parse_line(sep, line):
"""
Parse a grub commands/config with format: cmd{sep}opts
Returns: (name, value): value can be None
"""
strs = line.split(sep, 1)
return (strs[0].strip(), None) if len(strs) == 1 else (strs[0].strip(), strs[1].strip()) | Parse a grub commands/config with format: cmd{sep}opts
Returns: (name, value): value can be None |
def load_shapefile(self, feature_type, base_path):
"""Load downloaded shape file to QGIS Main Window.
TODO: This is cut & paste from OSM - refactor to have one method
:param feature_type: What kind of features should be downloaded.
Currently 'buildings', 'building-points' or 'roads' are supported.
:type feature_type: str
:param base_path: The base path of the shape file (without extension).
:type base_path: str
:raises: FileMissingError - when buildings.shp not exist
"""
path = '%s.shp' % base_path
if not os.path.exists(path):
message = self.tr(
'%s does not exist. The server does not have any data for '
'this extent.' % path)
raise FileMissingError(message)
self.iface.addVectorLayer(path, feature_type, 'ogr') | Load downloaded shape file to QGIS Main Window.
TODO: This is cut & paste from OSM - refactor to have one method
:param feature_type: What kind of features should be downloaded.
Currently 'buildings', 'building-points' or 'roads' are supported.
:type feature_type: str
:param base_path: The base path of the shape file (without extension).
:type base_path: str
:raises: FileMissingError - when buildings.shp not exist |
def outline(self, face_ids=None, **kwargs):
"""
Given a list of face indexes find the outline of those
faces and return it as a Path3D.
The outline is defined here as every edge which is only
included by a single triangle.
Note that this implies a non-watertight mesh as the
outline of a watertight mesh is an empty path.
Parameters
----------
face_ids : (n,) int
Indices to compute the outline of.
If None, outline of full mesh will be computed.
**kwargs: passed to Path3D constructor
Returns
----------
path : Path3D
Curve in 3D of the outline
"""
from .path.exchange.misc import faces_to_path
from .path.exchange.load import _create_path
path = _create_path(**faces_to_path(self,
face_ids,
**kwargs))
return path | Given a list of face indexes find the outline of those
faces and return it as a Path3D.
The outline is defined here as every edge which is only
included by a single triangle.
Note that this implies a non-watertight mesh as the
outline of a watertight mesh is an empty path.
Parameters
----------
face_ids : (n,) int
Indices to compute the outline of.
If None, outline of full mesh will be computed.
**kwargs: passed to Path3D constructor
Returns
----------
path : Path3D
Curve in 3D of the outline |
def optimize(self, x0, f=None, df=None, f_df=None):
"""
:param x0: initial point for a local optimizer.
:param f: function to optimize.
:param df: gradient of the function to optimize.
:param f_df: returns both the function to optimize and its gradient.
"""
if len(self.bounds) == 1:
raise IndexError("CMA does not work in problems of dimension 1.")
try:
import cma
def CMA_f_wrapper(f):
def g(x):
return f(np.array([x]))
return g
lB = np.asarray(self.bounds)[:,0]
uB = np.asarray(self.bounds)[:,1]
x = cma.fmin(CMA_f_wrapper(f), x0, 0.6, options={"bounds":[lB, uB], "verbose":-1})[0]
return np.atleast_2d(x), f(np.atleast_2d(x))
except ImportError:
print("Cannot find cma library, please install it to use this option.")
raise | :param x0: initial point for a local optimizer.
:param f: function to optimize.
:param df: gradient of the function to optimize.
:param f_df: returns both the function to optimize and its gradient. |
def kruskal(dv=None, between=None, data=None, detailed=False,
export_filename=None):
"""Kruskal-Wallis H-test for independent samples.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'H' : The Kruskal-Wallis H statistic, corrected for ties
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes.
Due to the assumption that H has a chi square distribution, the number of
samples in each group must not be too small. A typical rule is that each
sample must have at least 5 measurements.
NaN values are automatically removed.
Examples
--------
Compute the Kruskal-Wallis H-test for independent samples.
>>> from pingouin import kruskal, read_dataset
>>> df = read_dataset('anova')
>>> kruskal(dv='Pain threshold', between='Hair color', data=df)
Source ddof1 H p-unc
Kruskal Hair color 3 10.589 0.014172
"""
from scipy.stats import chi2, rankdata, tiecorrect
# Check data
_check_dataframe(dv=dv, between=between, data=data,
effects='between')
# Remove NaN values
data = data.dropna()
# Reset index (avoid duplicate axis error)
data = data.reset_index(drop=True)
# Extract number of groups and total sample size
groups = list(data[between].unique())
n_groups = len(groups)
n = data[dv].size
# Rank data, dealing with ties appropriately
data['rank'] = rankdata(data[dv])
# Find the total of rank per groups
grp = data.groupby(between)['rank']
sum_rk_grp = grp.sum().values
n_per_grp = grp.count().values
# Calculate chi-square statistic (H)
H = (12 / (n * (n + 1)) * np.sum(sum_rk_grp**2 / n_per_grp)) - 3 * (n + 1)
# Correct for ties
H /= tiecorrect(data['rank'].values)
# Calculate DOF and p-value
ddof1 = n_groups - 1
p_unc = chi2.sf(H, ddof1)
# Create output dataframe
stats = pd.DataFrame({'Source': between,
'ddof1': ddof1,
'H': np.round(H, 3),
'p-unc': p_unc,
}, index=['Kruskal'])
col_order = ['Source', 'ddof1', 'H', 'p-unc']
stats = stats.reindex(columns=col_order)
stats.dropna(how='all', axis=1, inplace=True)
# Export to .csv
if export_filename is not None:
_export_table(stats, export_filename)
return stats | Kruskal-Wallis H-test for independent samples.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'H' : The Kruskal-Wallis H statistic, corrected for ties
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes.
Due to the assumption that H has a chi square distribution, the number of
samples in each group must not be too small. A typical rule is that each
sample must have at least 5 measurements.
NaN values are automatically removed.
Examples
--------
Compute the Kruskal-Wallis H-test for independent samples.
>>> from pingouin import kruskal, read_dataset
>>> df = read_dataset('anova')
>>> kruskal(dv='Pain threshold', between='Hair color', data=df)
Source ddof1 H p-unc
Kruskal Hair color 3 10.589 0.014172 |
def mod_repo(repo, **kwargs):
'''
Modify one or more values for a repo. If the repo does not exist, it will
be created, so long as uri is defined.
The following options are available to modify a repo definition:
repo
alias by which opkg refers to the repo.
uri
the URI to the repo.
compressed
defines (True or False) if the index file is compressed
enabled
enable or disable (True or False) repository
but do not remove if disabled.
refresh
enable or disable (True or False) auto-refresh of the repositories
CLI Examples:
.. code-block:: bash
salt '*' pkg.mod_repo repo uri=http://new/uri
salt '*' pkg.mod_repo repo enabled=False
'''
repos = list_repos()
found = False
uri = ''
if 'uri' in kwargs:
uri = kwargs['uri']
for repository in repos:
source = repos[repository][0]
if source['name'] == repo:
found = True
repostr = ''
if 'enabled' in kwargs and not kwargs['enabled']:
repostr += '# '
if 'compressed' in kwargs:
repostr += 'src/gz ' if kwargs['compressed'] else 'src'
else:
repostr += 'src/gz' if source['compressed'] else 'src'
repo_alias = kwargs['alias'] if 'alias' in kwargs else repo
if ' ' in repo_alias:
repostr += ' "{0}"'.format(repo_alias)
else:
repostr += ' {0}'.format(repo_alias)
repostr += ' {0}'.format(kwargs['uri'] if 'uri' in kwargs else source['uri'])
trusted = kwargs.get('trusted')
repostr = _set_trusted_option_if_needed(repostr, trusted) if trusted is not None else \
_set_trusted_option_if_needed(repostr, source.get('trusted'))
_mod_repo_in_file(repo, repostr, source['file'])
elif uri and source['uri'] == uri:
raise CommandExecutionError(
'Repository \'{0}\' already exists as \'{1}\'.'.format(uri, source['name']))
if not found:
# Need to add a new repo
if 'uri' not in kwargs:
raise CommandExecutionError(
'Repository \'{0}\' not found and no URI passed to create one.'.format(repo))
properties = {'uri': kwargs['uri']}
# If compressed is not defined, assume True
properties['compressed'] = kwargs['compressed'] if 'compressed' in kwargs else True
# If enabled is not defined, assume True
properties['enabled'] = kwargs['enabled'] if 'enabled' in kwargs else True
properties['trusted'] = kwargs.get('trusted')
_add_new_repo(repo, properties)
if 'refresh' in kwargs:
refresh_db() | Modify one or more values for a repo. If the repo does not exist, it will
be created, so long as uri is defined.
The following options are available to modify a repo definition:
repo
alias by which opkg refers to the repo.
uri
the URI to the repo.
compressed
defines (True or False) if the index file is compressed
enabled
enable or disable (True or False) repository
but do not remove if disabled.
refresh
enable or disable (True or False) auto-refresh of the repositories
CLI Examples:
.. code-block:: bash
salt '*' pkg.mod_repo repo uri=http://new/uri
salt '*' pkg.mod_repo repo enabled=False |
def wiki_list(self, title=None, creator_id=None, body_matches=None,
other_names_match=None, creator_name=None, hide_deleted=None,
other_names_present=None, order=None):
"""Function to retrieves a list of every wiki page.
Parameters:
title (str): Page title.
creator_id (int): Creator id.
body_matches (str): Page content.
other_names_match (str): Other names.
creator_name (str): Creator name.
hide_deleted (str): Can be: yes, no.
other_names_present (str): Can be: yes, no.
order (str): Can be: date, title.
"""
params = {
'search[title]': title,
'search[creator_id]': creator_id,
'search[body_matches]': body_matches,
'search[other_names_match]': other_names_match,
'search[creator_name]': creator_name,
'search[hide_deleted]': hide_deleted,
'search[other_names_present]': other_names_present,
'search[order]': order
}
return self._get('wiki_pages.json', params) | Function to retrieves a list of every wiki page.
Parameters:
title (str): Page title.
creator_id (int): Creator id.
body_matches (str): Page content.
other_names_match (str): Other names.
creator_name (str): Creator name.
hide_deleted (str): Can be: yes, no.
other_names_present (str): Can be: yes, no.
order (str): Can be: date, title. |
def create_parser_options(lazy_mfcollection_parsing: bool = False) -> Dict[str, Dict[str, Any]]:
"""
Utility method to create a default options structure with the lazy parsing inside
:param lazy_mfcollection_parsing:
:return: the options structure filled with lazyparsing option (for the MultifileCollectionParser)
"""
return {MultifileCollectionParser.__name__: {'lazy_parsing': lazy_mfcollection_parsing}} | Utility method to create a default options structure with the lazy parsing inside
:param lazy_mfcollection_parsing:
:return: the options structure filled with lazyparsing option (for the MultifileCollectionParser) |
def getSpanDurations(self, time_stamp, service_name, rpc_name):
"""
Given a time stamp, server service name, and rpc name, fetch all of the client services calling in paired
with the lists of every span duration (list<i64>) from the server to client. The lists of span durations
include information on call counts and mean/stdDev/etc of call durations.
The three arguments specify epoch time in microseconds, server side service name and rpc name. The return maps
contains the key - client_service_name and value - list<span_durations>.
Parameters:
- time_stamp
- service_name
- rpc_name
"""
self.send_getSpanDurations(time_stamp, service_name, rpc_name)
return self.recv_getSpanDurations() | Given a time stamp, server service name, and rpc name, fetch all of the client services calling in paired
with the lists of every span duration (list<i64>) from the server to client. The lists of span durations
include information on call counts and mean/stdDev/etc of call durations.
The three arguments specify epoch time in microseconds, server side service name and rpc name. The return maps
contains the key - client_service_name and value - list<span_durations>.
Parameters:
- time_stamp
- service_name
- rpc_name |
def paramtypes(self):
""" get all parameter types """
for m in [p[1] for p in self.ports]:
for p in [p[1] for p in m]:
for pd in p:
if pd[1] in self.params:
continue
item = (pd[1], pd[1].resolve())
self.params.append(item) | get all parameter types |
def unquote(str):
"""Remove quotes from a string."""
if len(str) > 1:
if str.startswith('"') and str.endswith('"'):
return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
if str.startswith('<') and str.endswith('>'):
return str[1:-1]
return str | Remove quotes from a string. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.