Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
382,400 | def dispatch(self, test=False):
if not self.new_to_dispatch:
raise DispatcherError("Dispatcher cannot dispatch, "
"because no configuration is prepared!")
if self.first_dispatch_done:
raise DispatcherError("Dispatcher cannot dispatch, "
"because the configuration is still dispatched!")
if self.dispatch_ok:
logger.info("Dispatching is already done and ok...")
return
logger.info("Trying to send configuration to the satellites...")
self.dispatch_ok = True
for link in self.arbiters:
if link == self.arbiter_link:
continue
if not link.active:
continue
if not link.spare:
continue
if link.configuration_sent:
logger.debug("Arbiter %s already sent!", link.name)
continue
if not link.reachable:
logger.debug("Arbiter %s is not reachable to receive its configuration",
link.name)
continue
logger.info("Sending configuration to the arbiter %s", link.name)
logger.debug("- %s", link.cfg)
link.put_conf(link.cfg, test=test)
link.configuration_sent = True
logger.info("- sent")
link.do_not_run()
for link in self.schedulers:
if link.configuration_sent:
logger.debug("Scheduler %s already sent!", link.name)
continue
if not link.active:
continue
if not link.reachable:
logger.debug("Scheduler %s is not reachable to receive its configuration",
link.name)
continue
logger.info("Sending configuration to the scheduler %s", link.name)
logger.debug("- %s", link.cfg)
link.put_conf(link.cfg, test=test)
link.configuration_sent = True
logger.info("- sent")
for link in self.satellites:
if link.configuration_sent:
logger.debug("%s %s already sent!", link.type, link.name)
continue
if not link.active:
continue
if not link.reachable:
logger.warning("%s %s is not reachable to receive its configuration",
link.type, link.name)
continue
logger.info("Sending configuration to the %s %s", link.type, link.name)
logger.debug("- %s", link.cfg)
link.put_conf(link.cfg, test=test)
link.configuration_sent = True
logger.info("- sent")
if self.dispatch_ok:
self.new_to_dispatch = False
self.first_dispatch_done = True | Send configuration to satellites
:return: None |
382,401 | def exhaust_stream(f):
def wrapper(self, stream, *args, **kwargs):
try:
return f(self, stream, *args, **kwargs)
finally:
exhaust = getattr(stream, "exhaust", None)
if exhaust is not None:
exhaust()
else:
while 1:
chunk = stream.read(1024 * 64)
if not chunk:
break
return update_wrapper(wrapper, f) | Helper decorator for methods that exhausts the stream on return. |
382,402 | def execute_command_with_path_in_process(command, path, shell=False, cwd=None, logger=None):
if logger is None:
logger = _logger
logger.debug("Opening path with command: {0} {1}".format(command, path))
args = shlex.split(.format(command, path))
try:
subprocess.Popen(args, shell=shell, cwd=cwd)
return True
except OSError as e:
logger.error(.format(e))
return False | Executes a specific command in a separate process with a path as argument.
:param command: the command to be executed
:param path: the path as first argument to the shell command
:param bool shell: Whether to use a shell
:param str cwd: The working directory of the command
:param logger: optional logger instance which can be handed from other module
:return: None |
382,403 | def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith( + name):
raise CommandError(
+ name)
value_str = response[4][0][(len(name)+1):]
if tp == bool:
return (value_str == )
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str) | Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter. |
382,404 | def send_facebook(self, token):
self.send_struct( % len(token), 81, *map(ord, token))
self.facebook_token = token | Tells the server which Facebook account this client uses.
After sending, the server takes some time to
get the data from Facebook.
Seems to be broken in recent versions of the game. |
382,405 | def cmd(send, _, args):
adminlist = []
for admin in args[].query(Permissions).order_by(Permissions.nick).all():
if admin.registered:
adminlist.append("%s (V)" % admin.nick)
else:
adminlist.append("%s (U)" % admin.nick)
send(", ".join(adminlist), target=args[]) | Returns a list of admins.
V = Verified (authed to NickServ), U = Unverified.
Syntax: {command} |
382,406 | def encode(char_data, encoding=):
if type(char_data) is unicode:
return char_data.encode(encoding, )
else:
return char_data | Encode the parameter as a byte string.
:param char_data:
:rtype: bytes |
382,407 | def normalizeGlyphRightMargin(value):
if not isinstance(value, (int, float)) and value is not None:
raise TypeError("Glyph right margin must be an :ref:`type-int-float`, "
"not %s." % type(value).__name__)
return value | Normalizes glyph right margin.
* **value** must be a :ref:`type-int-float` or `None`.
* Returned value is the same type as the input value. |
382,408 | def _approximate_eigenvalues(A, tol, maxiter, symmetric=None,
initial_guess=None):
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
t = A.dtype.char
eps = np.finfo(np.float).eps
feps = np.finfo(np.single).eps
geps = np.finfo(np.longfloat).eps
_array_precision = {: 0, : 1, : 2, : 0, : 1, : 2}
breakdown = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]]
breakdown_flag = False
if A.shape[0] != A.shape[1]:
raise ValueError()
maxiter = min(A.shape[0], maxiter)
if initial_guess is None:
v0 = sp.rand(A.shape[1], 1)
if A.dtype == complex:
v0 = v0 + 1.0j * sp.rand(A.shape[1], 1)
else:
v0 = initial_guess
v0 /= norm(v0)
H = np.zeros((maxiter+1, maxiter),
dtype=np.find_common_type([v0.dtype, A.dtype], []))
V = [v0]
beta = 0.0
for j in range(maxiter):
w = A * V[-1]
if symmetric:
if j >= 1:
H[j-1, j] = beta
w -= beta * V[-2]
alpha = np.dot(np.conjugate(w.ravel()), V[-1].ravel())
H[j, j] = alpha
w -= alpha * V[-1]
beta = norm(w)
H[j+1, j] = beta
if (H[j+1, j] < breakdown):
breakdown_flag = True
break
w /= beta
V.append(w)
V = V[-2:]
else:
for i, v in enumerate(V):
H[i, j] = np.dot(np.conjugate(v.ravel()), w.ravel())
w = w - H[i, j]*v
H[j+1, j] = norm(w)
if (H[j+1, j] < breakdown):
breakdown_flag = True
if H[j+1, j] != 0.0:
w = w/H[j+1, j]
V.append(w)
break
w = w/H[j+1, j]
V.append(w)
from scipy.linalg import eig
Eigs, Vects = eig(H[:j+1, :j+1], left=False, right=True)
return (Vects, Eigs, H, V, breakdown_flag) | Apprixmate eigenvalues.
Used by approximate_spectral_radius and condest.
Returns [W, E, H, V, breakdown_flag], where W and E are the eigenvectors
and eigenvalues of the Hessenberg matrix H, respectively, and V is the
Krylov space. breakdown_flag denotes whether Lanczos/Arnoldi suffered
breakdown. E is therefore the approximate eigenvalues of A.
To obtain approximate eigenvectors of A, compute V*W. |
382,409 | def count_matrix(self):
if self.hidden_state_trajectories is None:
raise RuntimeError()
C = msmest.count_matrix(self.hidden_state_trajectories, 1, nstates=self._nstates)
return C.toarray() | Compute the transition count matrix from hidden state trajectory.
Returns
-------
C : numpy.array with shape (nstates,nstates)
C[i,j] is the number of transitions observed from state i to state j
Raises
------
RuntimeError
A RuntimeError is raised if the HMM model does not yet have a hidden state trajectory associated with it.
Examples
-------- |
382,410 | def main_base_ramp(self) -> "Ramp":
if hasattr(self, "cached_main_base_ramp"):
return self.cached_main_base_ramp
self.cached_main_base_ramp = min(
{ramp for ramp in self.game_info.map_ramps if len(ramp.upper2_for_ramp_wall) == 2},
key=(lambda r: self.start_location.distance_to(r.top_center)),
)
return self.cached_main_base_ramp | Returns the Ramp instance of the closest main-ramp to start location. Look in game_info.py for more information |
382,411 | def _validate_logical(self, rule, field, value):
if not isinstance(value, Sequence):
self._error(field, errors.BAD_TYPE)
return
validator = self._get_child_validator(
document_crumb=rule, allow_unknown=False,
schema=self.target_validator.validation_rules)
for constraints in value:
_hash = (mapping_hash({: constraints}),
mapping_hash(self.target_validator.types_mapping))
if _hash in self.target_validator._valid_schemas:
continue
validator(constraints, normalize=False)
if validator._errors:
self._error(validator._errors)
else:
self.target_validator._valid_schemas.add(_hash) | {'allowed': ('allof', 'anyof', 'noneof', 'oneof')} |
382,412 | def set(self, key, val):
return self.evolver().set(key, val).persistent() | Return a new PMap with key and val inserted.
>>> m1 = m(a=1, b=2)
>>> m2 = m1.set('a', 3)
>>> m3 = m1.set('c' ,4)
>>> m1
pmap({'a': 1, 'b': 2})
>>> m2
pmap({'a': 3, 'b': 2})
>>> m3
pmap({'a': 1, 'c': 4, 'b': 2}) |
382,413 | def _build_flavors(p, flist, qualdecl=None):
flavors = {}
if ( in flist and in flist) \
or \
( in flist and in flist):
raise MOFParseError(parser_token=p, msg="Conflicting flavors are"
"invalid")
if qualdecl is not None:
flavors = {: qualdecl.overridable,
: qualdecl.translatable,
: qualdecl.tosubclass,
: qualdecl.toinstance}
if in flist:
flavors[] = False
if in flist:
flavors[] = True
if in flist:
flavors[] = True
if in flist:
flavors[] = False
if in flist:
flavors[] = True
if in flist:
flavors[] = True
return flavors | Build and return a dictionary defining the flavors from the
flist argument.
This function maps from the input keyword definitions for the flavors
(ex. EnableOverride) to the PyWBEM internal definitions
(ex. overridable)
Uses the qualdecl argument as a basis if it exists. This is to define
qualifier flavors if qualfier declaractions exist.
This applies the values from the qualifierDecl to the the qualifier
flavor list.
This function and the defaultflavor function insure that all
flavors are defined in the created dictionary that is returned. This
is important because the PyWBEM classes allow `None` as a flavor
definition. |
382,414 | def validate(self):
if not self.api_token or not self.api_token_secret:
raise ImproperlyConfigured(" and are required for authentication.")
if self.response_type not in ["json", "pson", "xml", "debug", None]:
raise ImproperlyConfigured(" is an invalid response_type" % self.response_type) | Perform validation check on properties. |
382,415 | def describe_unsupported(series, **kwargs):
leng = len(series)
count = series.count()
n_infinite = count - series.count()
results_data = {: count,
: 1 - count * 1.0 / leng,
: leng - count,
: n_infinite * 1.0 / leng,
: n_infinite,
: base.S_TYPE_UNSUPPORTED}
try:
results_data[] = series.memory_usage()
except:
results_data[] = 0
return pd.Series(results_data, name=series.name) | Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys. |
382,416 | def publish(self, message, tag=b):
self.send(tag + b + message) | Publish `message` with specified `tag`.
:param message: message data
:type message: str
:param tag: message tag
:type tag: str |
382,417 | def update(self):
stats = self.get_init_value()
if self.input_method == :
stats = []
try:
temperature = self.__set_type(self.glancesgrabsensors.get(),
)
except Exception as e:
logger.error("Cannot grab sensors temperatures (%s)" % e)
else:
stats.extend(temperature)
try:
fan_speed = self.__set_type(self.glancesgrabsensors.get(),
)
except Exception as e:
logger.error("Cannot grab FAN speed (%s)" % e)
else:
stats.extend(fan_speed)
try:
hddtemp = self.__set_type(self.hddtemp_plugin.update(),
)
except Exception as e:
logger.error("Cannot grab HDD temperature (%s)" % e)
else:
stats.extend(hddtemp)
try:
batpercent = self.__set_type(self.batpercent_plugin.update(),
)
except Exception as e:
logger.error("Cannot grab battery percent (%s)" % e)
else:
stats.extend(batpercent)
elif self.input_method == :
pass
for stat in stats:
alias = self.has_alias(stat["label"].lower())
if alias:
stat["label"] = alias
self.stats = stats
return self.stats | Update sensors stats using the input method. |
382,418 | def transformer_en_de_512(dataset_name=None, src_vocab=None, tgt_vocab=None, pretrained=False,
ctx=cpu(), root=os.path.join(get_home_dir(), ), **kwargs):
r
predefined_args = {: 512,
: 2048,
: 0.1,
: 0.1,
: 6,
: 8,
: True,
: True,
: 512,
: True,
: None}
mutable_args = frozenset([, , , , ,
, ])
assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \
predefined_args.update(kwargs)
encoder, decoder = get_transformer_encoder_decoder(units=predefined_args[],
hidden_size=predefined_args[],
dropout=predefined_args[],
num_layers=predefined_args[],
num_heads=predefined_args[],
max_src_length=530,
max_tgt_length=549,
scaled=predefined_args[])
return _get_transformer_model(NMTModel, , dataset_name,
src_vocab, tgt_vocab, encoder, decoder,
predefined_args[], predefined_args[],
predefined_args[],
predefined_args[], pretrained, ctx, root) | r"""Transformer pretrained model.
Embedding size is 400, and hidden layer size is 1150.
Parameters
----------
dataset_name : str or None, default None
src_vocab : gluonnlp.Vocab or None, default None
tgt_vocab : gluonnlp.Vocab or None, default None
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
MXNET_HOME defaults to '~/.mxnet'.
Returns
-------
gluon.Block, gluonnlp.Vocab, gluonnlp.Vocab |
382,419 | def format(logger,
show_successful=True,
show_errors=True,
show_traceback=True):
output = []
errors = logger.get_aborted_actions()
if show_errors and errors:
output += _underline()
for log in logger.get_aborted_logs():
if show_traceback:
output.append(log.get_name() + )
output.append(log.get_error())
else:
output.append(log.get_name() + + log.get_error(False))
output.append()
if show_successful:
output += _underline()
for log in logger.get_succeeded_logs():
output.append(log.get_name())
output.append()
return .join(output).strip() | Prints a report of the actions that were logged by the given Logger.
The report contains a list of successful actions, as well as the full
error message on failed actions.
:type logger: Logger
:param logger: The logger that recorded what happened in the queue.
:rtype: string
:return: A string summarizing the status of every performed task. |
382,420 | def get_root_path(self, language):
path = None
if self.main and self.main.projects:
path = self.main.projects.get_active_project_path()
if language == :
path = get_conf_path()
if not osp.exists(path):
os.mkdir(path)
else:
path = getcwd_or_home()
return path | Get root path to pass to the LSP servers.
This can be the current project path or the output of
getcwd_or_home (except for Python, see below). |
382,421 | def _call_process(self, method, *args, **kwargs):
_kwargs = dict()
for kwarg in execute_kwargs:
try:
_kwargs[kwarg] = kwargs.pop(kwarg)
except KeyError:
pass
opt_args = self.transform_kwargs(**kwargs)
ext_args = self.__unpack_args([a for a in args if a is not None])
args = opt_args + ext_args
call = ["git", dashify(method)]
call.extend(args)
return self.execute(call, **_kwargs) | Run the given git command with the specified arguments and return
the result as a String
:param method:
is the command. Contained "_" characters will be converted to dashes,
such as in 'ls_files' to call 'ls-files'.
:param args:
is the list of arguments. If None is included, it will be pruned.
This allows your commands to call git more conveniently as None
is realized as non-existent
:param kwargs:
is a dict of keyword arguments.
This function accepts the same optional keyword arguments
as execute().
``Examples``::
git.rev_list('master', max_count=10, header=True)
:return: Same as ``execute`` |
382,422 | def get_cpu_info(self) -> str:
output, _ = self._execute(
, self.device_sn, , , )
return output | Show device CPU information. |
382,423 | def get_coeffs(expr, expand=False, epsilon=0.):
if expand:
expr = expr.expand()
ret = defaultdict(int)
operands = expr.operands if isinstance(expr, OperatorPlus) else [expr]
for e in operands:
c, t = _coeff_term(e)
try:
if abs(complex(c)) < epsilon:
continue
except TypeError:
pass
ret[t] += c
return ret | Create a dictionary with all Operator terms of the expression
(understood as a sum) as keys and their coefficients as values.
The returned object is a defaultdict that return 0. if a term/key
doesn't exist.
Args:
expr: The operator expression to get all coefficients from.
expand: Whether to expand the expression distributively.
epsilon: If non-zero, drop all Operators with coefficients that have
absolute value less than epsilon.
Returns:
dict: A dictionary ``{op1: coeff1, op2: coeff2, ...}`` |
382,424 | def _send_packet(
self, ip, port, packet,
update_timestamp=True, acknowledge_packet=True
):
if acknowledge_packet:
packet.header.sequence_number = self._send_seq_num
self._send_seq_num += 1
packet.header.device_id = self._device_id
try:
packed = packet.pack(update_timestamp=update_timestamp)
except ValueError:
self.exception("Failed to pack packet")
return
self._send(ip, port, packed)
if acknowledge_packet:
with self._seq_ack_lock:
self._seq_ack.add(packet.header.sequence_number)
self._to_ack.put(
(time.time() + self._retransmit_timeout, 1, (ip, port), packet)
)
self.debug(u"Send: {}".format(packet)) | Send a packet
:param ip: Ip to send to
:type ip: str
:param port: Port to send to
:type port: int
:param packet: Packet to be transmitted
:type packet: APPMessage
:param update_timestamp: Should update timestamp to current
:type update_timestamp: bool
:param acknowledge_packet: Should packet get acknowledged
:type acknowledge_packet: bool
:rtype: None |
382,425 | def get_model(cls, name=None, status=ENABLED):
ppath = cls.get_pythonpath()
if is_plugin_point(cls):
if name is not None:
kwargs = {}
if status is not None:
kwargs[] = status
return Plugin.objects.get(point__pythonpath=ppath,
name=name, **kwargs)
else:
return PluginPointModel.objects.get(pythonpath=ppath)
else:
return Plugin.objects.get(pythonpath=ppath) | Returns model instance of plugin point or plugin, depending from which
class this methos is called.
Example::
plugin_model_instance = MyPlugin.get_model()
plugin_model_instance = MyPluginPoint.get_model('plugin-name')
plugin_point_model_instance = MyPluginPoint.get_model() |
382,426 | def coerce(cls, arg):
try:
return cls(arg).value
except (ValueError, TypeError):
raise InvalidParameterDatatype("%s coerce error" % (cls.__name__,)) | Given an arg, return the appropriate value given the class. |
382,427 | def _get_simple_dtype_and_shape(self, colnum, rows=None):
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
info = self._info[][colnum]
name = info[]
if rows is None:
nrows = self._info[]
else:
nrows = rows.size
shape = None
tdim = info[]
shape = _tdim2shape(tdim, name, is_string=(npy_type[0] == ))
if shape is not None:
if nrows > 1:
if not isinstance(shape, tuple):
shape = (nrows, shape)
else:
shape = tuple([nrows] + list(shape))
else:
shape = nrows
return npy_type, shape | When reading a single column, we want the basic data
type and the shape of the array.
for scalar columns, shape is just nrows, otherwise
it is (nrows, dim1, dim2)
Note if rows= is sent and only a single row is requested,
the shape will be (dim2,dim2) |
382,428 | def predict_is(self, h=5, fit_once=True, fit_method=, intervals=False):
predictions = []
for t in range(0,h):
data1 = self.data_original.iloc[:-h+t,:]
data2 = self.data_original.iloc[-h+t:,:]
x = DynReg(formula=self.formula, data=data1)
if fit_once is False:
x.fit(printer=False, fit_method=fit_method)
if t == 0:
if fit_once is True:
x.fit(printer=False, fit_method=fit_method)
saved_lvs = x.latent_variables
predictions = x.predict(1, oos_data=data2, intervals=intervals)
else:
if fit_once is True:
x.latent_variables = saved_lvs
predictions = pd.concat([predictions,x.predict(h=1, oos_data=data2, intervals=intervals)])
predictions.rename(columns={0:self.y_name}, inplace=True)
predictions.index = self.index[-h:]
return predictions | Makes dynamic in-sample predictions with the estimated model
Parameters
----------
h : int (default : 5)
How many steps would you like to forecast?
fit_once : boolean
(default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint
fit_method : string
Which method to fit the model with
intervals : boolean
Whether to output prediction intervals or not
Returns
----------
- pd.DataFrame with predicted values |
382,429 | def extend(self, *args):
args = list(args)
for i in args:
self.obj.update(i)
return self._wrap(self.obj) | Extend a given object with all the properties in
passed-in object(s). |
382,430 | def add(self, fact):
token = Token.valid(fact)
MATCHER.debug("<BusNode> added %r", token)
for child in self.children:
child.callback(token) | Create a VALID token and send it to all children. |
382,431 | def widgetEdited(self, event=None, val=None, action=, skipDups=True):
if not self._editedCallbackObj and not self._flagNonDefaultVals:
return
curVal = val
if curVal is None:
curVal = self.choice.get()
self.flagThisPar(curVal, False)
if skipDups and curVal==self._lastWidgetEditedVal: return
if not self._editedCallbackObj: return
self._editedCallbackObj.edited(self.paramInfo.scope,
self.paramInfo.name,
self.previousValue, curVal,
action)
self._lastWidgetEditedVal = curVal | A general method for firing any applicable triggers when
a value has been set. This is meant to be easily callable from any
part of this class (or its subclasses), so that it can be called
as soon as need be (immed. on click?). This is smart enough to
be called multiple times, itself handling the removal of any/all
duplicate successive calls (unless skipDups is False). If val is
None, it will use the GUI entry's current value via choice.get().
See teal.py for a description of action. |
382,432 | def get_default_config(self):
config = super(IPMISensorCollector, self).get_default_config()
config.update({
: ,
: False,
: ,
: ,
: False,
:
})
return config | Returns the default collector settings |
382,433 | def curated(name):
return cached_download( + name,
os.path.join(, name.replace(, os.path.sep))) | Download and return a path to a sample that is curated by the PyAV developers.
Data is handled by :func:`cached_download`. |
382,434 | def power(self, n):
if n > 0:
return super().power(n)
return Kraus(SuperOp(self).power(n)) | The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Kraus: the matrix power of the SuperOp converted to a Kraus channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer. |
382,435 | def tar(filename, dirs=[], gzip=False):
if gzip:
cmd = % filename
else:
cmd = % filename
if type(dirs) != :
dirs = [dirs]
cmd += .join(str(x) for x in dirs)
retcode, output = sh(cmd)
return (retcode, output, filename) | Create a tar-file or a tar.gz at location: filename.
params:
gzip: if True - gzip the file, default = False
dirs: dirs to be tared
returns a 3-tuple with returncode (integer), terminal output (string)
and the new filename. |
382,436 | def filter_by_cols(self, cols, ID=None):
rows = to_list(cols)
fil = lambda x: x in rows
applyto = {k: self._positions[k][1] for k in self.keys()}
if ID is None:
ID = self.ID +
return self.filter(fil, applyto=applyto, ID=ID) | Keep only Measurements in corresponding columns. |
382,437 | def strip_empty_lines_forward(self, content, i):
while i < len(content):
line = content[i].strip()
if line != :
break
self.debug_print_strip_msg(i, content[i])
i += 1
return i | Skip over empty lines
:param content: parsed text
:param i: current parsed line
:return: number of skipped lined |
382,438 | def from_pdf(
cls, pdf, filename, width=288, height=432, dpi=203, font_path=None,
center_of_pixel=False, use_bindings=False
):
setpagedevice = [
,
]
cmd = [
,
,
,
,
,
,
,
,
% int(dpi),
% int(width),
% int(height),
,
,
,
% .join(setpagedevice)
]
if center_of_pixel:
cmd += []
if font_path and os.path.exists(font_path):
cmd += [ + font_path]
if use_bindings:
import ghostscript
grfs = []
for png in pngs.split(png_start)[1:]:
grfs.append(cls.from_image(png_start + png, filename))
return grfs | Filename is 1-8 alphanumeric characters to identify the GRF in ZPL.
Dimensions and DPI are for a typical 4"x6" shipping label.
E.g. 432 points / 72 points in an inch / 203 dpi = 6 inches
Using center of pixel will improve barcode quality but may decrease
the quality of some text.
use_bindings=False:
- Uses subprocess.Popen
- Forks so there is a memory spike
- Easier to setup - only needs the gs binary
use_bindings=True:
- Uses python-ghostscript
- Doesn't fork so should use less memory
- python-ghostscript is a bit buggy
- May be harder to setup - even if you have updated the gs binary
there may stil be old libgs* files on your system |
382,439 | def get_people(self, user_alias=None):
user_alias = user_alias or self.api.user_alias
content = self.api.req(API_PEOPLE_HOME % user_alias).content
xml = self.api.to_xml(re.sub(b, b, content))
try:
xml_user = xml.xpath()
if not xml_user:
return None
else:
xml_user = xml_user[0]
avatar = first(xml_user.xpath())
city = first(xml_user.xpath())
city_url = first(xml_user.xpath())
text_created_at = xml_user.xpath()[1]
created_at = re.match(r, text_created_at.strip()).group()
xml_intro = first(xml.xpath())
intro = xml_intro.xpath() if xml_intro is not None else None
nickname = first(xml.xpath(), ).strip() or None
signature = first(xml.xpath())
xml_contact_count = xml.xpath()[0]
contact_count = int(re.search(r, xml_contact_count.xpath()).groups()[0])
text_rev_contact_count = xml.xpath()[0]
rev_contact_count = int(re.search(r, text_rev_contact_count.strip()).groups()[0])
return {
: user_alias,
: API_PEOPLE_HOME % user_alias,
: avatar,
: city,
: city_url,
: created_at,
: intro,
: nickname,
: signature,
: contact_count,
: rev_contact_count,
}
except Exception as e:
self.api.logger.exception( % e) | 获取用户信息
:param user_alias: 用户ID
:return: |
382,440 | def _strOrDate(st):
if isinstance(st, string_types):
return st
elif isinstance(st, datetime):
return st.strftime()
raise PyEXception(, str(st)) | internal |
382,441 | def touch(self, connection=None):
self.create_marker_table()
if connection is None:
connection = self.connect()
connection.autocommit = True
connection.cursor().execute(
.format(marker_table=self.marker_table),
(self.update_id, self.table)
)
assert self.exists(connection) | Mark this update as complete.
IMPORTANT, If the marker table doesn't exist,
the connection transaction will be aborted and the connection reset.
Then the marker table will be created. |
382,442 | def get_or_create_ec2_key_pair(name=None, verbose=1):
verbose = int(verbose)
name = name or env.vm_ec2_keypair_name
pem_path = % (env.ROLE, name)
conn = get_ec2_connection()
kp = conn.get_key_pair(name)
if kp:
print( % name)
else:
kp = conn.create_key_pair(name)
open(pem_path, ).write(kp.material)
os.system( % pem_path)
print( % name)
return pem_path | Creates and saves an EC2 key pair to a local PEM file. |
382,443 | def get(self, hash, account="*", max_transactions=100, min_confirmations=6, raw=False):
if len(hash) < 64:
txs = self._service.list_transactions(hash, account=account, max_transactions=max_transactions)
unspents = self._service.list_unspents(hash, min_confirmations=min_confirmations)
return {: txs, : unspents}
else:
return self._service.get_transaction(hash, raw=raw) | Args:
hash: can be a bitcoin address or a transaction id. If it's a
bitcoin address it will return a list of transactions up to
``max_transactions`` a list of unspents with confirmed
transactions greater or equal to ``min_confirmantions``
account (Optional[str]): used when using the bitcoind. bitcoind
does not provide an easy way to retrieve transactions for a
single address. By using account we can retrieve transactions
for addresses in a specific account
Returns:
transaction |
382,444 | def cache_method(func=None, prefix=):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
cache_key_prefix = prefix or .format(func.__name__)
cache_key = get_cache_key(cache_key_prefix, *args, **kwargs)
if not hasattr(self, cache_key):
setattr(self, cache_key, func(self))
return getattr(self, cache_key)
return wrapper
if func is None:
return decorator
else:
return decorator(func) | Cache result of function execution into the `self` object (mostly useful in models).
Calculate cache key based on `args` and `kwargs` of the function (except `self`). |
382,445 | def get_schema(self):
path = os.path.join(self._get_schema_folder(), self._name + ".json")
with open(path, "rb") as file:
schema = json.loads(file.read().decode("UTF-8"))
return schema | Return the schema. |
382,446 | def export(self, path, session):
def variables_saver(variables_path):
if self._saver:
self._saver.save(
session, variables_path,
write_meta_graph=False,
write_state=False)
self._spec._export(path, variables_saver) | See `Module.export`. |
382,447 | def draw_markers(self):
self._canvas_markers.clear()
for marker in self._markers.values():
self.create_marker(marker["category"], marker["start"], marker["finish"], marker) | Draw all created markers on the TimeLine Canvas |
382,448 | def hide_routemap_holder_route_map_content_set_dampening_half_life(self, **kwargs):
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop()
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop()
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop()
content = ET.SubElement(route_map, "content")
set = ET.SubElement(content, "set")
dampening = ET.SubElement(set, "dampening")
half_life = ET.SubElement(dampening, "half-life")
half_life.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
382,449 | def reinforce(self, **kwargs):
results = reinforce_grid(
self, max_while_iterations=kwargs.get(
, 10),
copy_graph=kwargs.get(, False),
timesteps_pfa=kwargs.get(, None),
combined_analysis=kwargs.get(, False))
if not kwargs.get(, False):
self.network.results.measures =
return results | Reinforces the grid and calculates grid expansion costs.
See :meth:`edisgo.flex_opt.reinforce_grid` for more information. |
382,450 | def _send_data(self, data, start_offset, file_len):
headers = {}
end_offset = start_offset + len(data) - 1
if data:
headers[] = ( %
(start_offset, end_offset, file_len))
else:
headers[] = ( % file_len)
status, response_headers, content = self._api.put_object(
self._path_with_token, payload=data, headers=headers)
if file_len == :
expected = 308
else:
expected = 200
errors.check_status(status, [expected], self._path, headers,
response_headers, content,
{: self._path_with_token}) | Send the block to the storage service.
This is a utility method that does not modify self.
Args:
data: data to send in str.
start_offset: start offset of the data in relation to the file.
file_len: an int if this is the last data to append to the file.
Otherwise '*'. |
382,451 | def setName( self, name ):
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self | Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) |
382,452 | def kana2alphabet(text):
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, ).replace(, ).replace(, )
text = text.replace(, )
text = _convert(text, KANA2HEP)
while in text:
text = list(text)
tsu_pos = text.index()
if len(text) <= tsu_pos + 1:
return .join(text[:-1]) +
if tsu_pos == 0:
text[tsu_pos] =
else:
text[tsu_pos] = text[tsu_pos + 1]
text = .join(text)
return text | Convert Hiragana to hepburn-style alphabets
Parameters
----------
text : str
Hiragana string.
Return
------
str
Hepburn-style alphabets string.
Examples
--------
>>> print(jaconv.kana2alphabet('まみさん'))
mamisan |
382,453 | def bipartition(seq):
return [(tuple(seq[i] for i in part0_idx),
tuple(seq[j] for j in part1_idx))
for part0_idx, part1_idx in bipartition_indices(len(seq))] | Return a list of bipartitions for a sequence.
Args:
a (Iterable): The sequence to partition.
Returns:
list[tuple[tuple]]: A list of tuples containing each of the two
partitions.
Example:
>>> bipartition((1,2,3))
[((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,))] |
382,454 | def get_all_fields(self, arr):
for k, v in self.fields.items():
arr.append(v)
if self.extends:
parent = self.contract.get(self.extends)
if parent:
return parent.get_all_fields(arr)
return arr | Returns a list containing this struct's fields and all the fields of
its ancestors. Used during validation. |
382,455 | def _xfs_info_get_kv(serialized):
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
return [tuple(items.split("=")) for items in opt] | Parse one line of the XFS info output. |
382,456 | def delist(target):
result = target
if type(target) is dict:
for key in target:
target[key] = delist(target[key])
if type(target) is list:
if len(target)==0:
result = None
elif len(target)==1:
result = delist(target[0])
else:
result = [delist(e) for e in target]
return result | for any "list" found, replace with a single entry if the list has exactly one entry |
382,457 | def add_oxidation_state_by_guess(self, **kwargs):
oxid_guess = self.composition.oxi_state_guesses(**kwargs)
oxid_guess = oxid_guess or \
[dict([(e.symbol, 0) for e in self.composition])]
self.add_oxidation_state_by_element(oxid_guess[0]) | Decorates the structure with oxidation state, guessing
using Composition.oxi_state_guesses()
Args:
**kwargs: parameters to pass into oxi_state_guesses() |
382,458 | def dictlist_replace(dict_list: Iterable[Dict], key: str, value: Any) -> None:
for d in dict_list:
d[key] = value | Process an iterable of dictionaries. For each dictionary ``d``, change
(in place) ``d[key]`` to ``value``. |
382,459 | def run(host=, port=5000, reload=True, debug=True):
from werkzeug.serving import run_simple
app = bootstrap.get_app()
return run_simple(
hostname=host,
port=port,
application=app,
use_reloader=reload,
use_debugger=debug,
) | Run development server |
382,460 | def get_documents(self):
return self.session.query(Document).order_by(Document.name).all() | Return all the parsed ``Documents`` in the database.
:rtype: A list of all ``Documents`` in the database ordered by name. |
382,461 | def display_widgets(self):
for child in self.children:
if child.displayable:
if self.layout != "grid":
child.tk.pack_forget()
else:
child.tk.grid_forget()
if child.visible:
if self.layout != "grid":
self._pack_widget(child)
else:
self._grid_widget(child) | Displays all the widgets associated with this Container.
Should be called when the widgets need to be "re-packed/gridded". |
382,462 | def remove(self, docid):
docid = int(docid)
self.store.executeSQL(self.removeSQL, (docid,)) | Remove a document from the database. |
382,463 | def _id(self):
result =
while self.char is not None and (self.char.isalnum() or self.char == ):
result += self.char
self.advance()
token = RESERVED_KEYWORDS.get(result, Token(Nature.ID, result))
return token | Handle identifiers and reserverd keywords. |
382,464 | def surface_state(num_lat=90,
num_lon=None,
water_depth=10.,
T0=12.,
T2=-40.):
if num_lon is None:
sfc = domain.zonal_mean_surface(num_lat=num_lat,
water_depth=water_depth)
else:
sfc = domain.surface_2D(num_lat=num_lat,
num_lon=num_lon,
water_depth=water_depth)
if in sfc.axes:
lon, lat = np.meshgrid(sfc.axes[].points, sfc.axes[].points)
else:
lat = sfc.axes[].points
sinphi = np.sin(np.deg2rad(lat))
initial = T0 + T2 * legendre.P2(sinphi)
Ts = Field(initial, domain=sfc)
state = AttrDict()
state[] = Ts
return state | Sets up a state variable dictionary for a surface model
(e.g. :class:`~climlab.model.ebm.EBM`) with a uniform slab ocean depth.
The domain is either 1D (latitude) or 2D (latitude, longitude)
depending on whether the input argument num_lon is supplied.
Returns a single state variable `Ts`, the temperature of the surface
mixed layer (slab ocean).
The temperature is initialized to a smooth equator-to-pole shape given by
.. math::
T(\phi) = T_0 + T_2 P_2(\sin\phi)
where :math:`\phi` is latitude, and :math:`P_2` is the second Legendre
polynomial :class:`~climlab.utils.legendre.P2`.
**Function-call arguments** \n
:param int num_lat: number of latitude points [default: 90]
:param int num_lat: (optional) number of longitude points [default: None]
:param float water_depth: depth of the slab ocean in meters [default: 10.]
:param float T0: global-mean initial temperature in :math:`^{\circ} \\textrm{C}` [default: 12.]
:param float T2: 2nd Legendre coefficient for equator-to-pole gradient in
initial temperature, in :math:`^{\circ} \\textrm{C}` [default: -40.]
:returns: dictionary with temperature
:class:`~climlab.domain.field.Field`
for surface mixed layer ``Ts``
:rtype: dict
:Example:
::
>>> from climlab.domain import initial
>>> import numpy as np
>>> T_dict = initial.surface_state(num_lat=36)
>>> print np.squeeze(T_dict['Ts'])
[-27.88584094 -26.97777479 -25.18923361 -22.57456133 -19.21320344
-15.20729309 -10.67854785 -5.76457135 -0.61467228 4.61467228
9.76457135 14.67854785 19.20729309 23.21320344 26.57456133
29.18923361 30.97777479 31.88584094 31.88584094 30.97777479
29.18923361 26.57456133 23.21320344 19.20729309 14.67854785
9.76457135 4.61467228 -0.61467228 -5.76457135 -10.67854785
-15.20729309 -19.21320344 -22.57456133 -25.18923361 -26.97777479
-27.88584094] |
382,465 | def shift(func, *args, **kwargs):
@wraps(func)
def wrapped(x):
return func(x, *args, **kwargs)
return wrapped | This function is basically a beefed up lambda x: func(x, *args, **kwargs)
:func:`shift` comes in handy when it is used in a pipeline with a function that
needs the passed value as its first argument.
:param func: a function
:param args: objects
:param kwargs: keywords
>>> def div(x, y): return float(x) / y
This is equivalent to div(42, 2)::
>>> shift(div, 2)(42)
21.0
which is different from div(2, 42)::
>>> from functools import partial
>>> partial(div, 2)(42)
0.047619047619047616 |
382,466 | def task(**kwargs):
def wrapper(wrapped):
def callback(scanner, name, obj):
celery_app = scanner.config.registry.celery_app
celery_app.task(**kwargs)(obj)
venusian.attach(wrapped, callback)
return wrapped
return wrapper | A function task decorator used in place of ``@celery_app.task``. |
382,467 | def import_keyset(self, keyset):
try:
jwkset = json_decode(keyset)
except Exception:
raise InvalidJWKValue()
if not in jwkset:
raise InvalidJWKValue()
for k, v in iteritems(jwkset):
if k == :
for jwk in v:
self[].add(JWK(**jwk))
else:
self[k] = v | Imports a RFC 7517 keyset using the standard JSON format.
:param keyset: The RFC 7517 representation of a JOSE Keyset. |
382,468 | def integratedAutocorrelationTime(A_n, B_n=None, fast=False, mintime=3):
g = statisticalInefficiency(A_n, B_n, fast, mintime)
tau = (g - 1.0) / 2.0
return tau | Estimate the integrated autocorrelation time.
See Also
--------
statisticalInefficiency |
382,469 | def consulta(self, endereco, primeiro=False,
uf=None, localidade=None, tipo=None, numero=None):
if uf is None:
url =
data = {
: endereco.encode(),
: ,
: ,
: 1,
: ,
: ,
: ,
:
}
else:
url =
data = {
: endereco.encode(),
: uf,
: tipo,
: localidade.encode(),
: numero,
: 1,
: ,
: ,
: ,
:
}
h = self._url_open(url, data)
html = h.read()
if primeiro:
return self.detalhe()
else:
return self._parse_tabela(html) | Consulta site e retorna lista de resultados |
382,470 | def _update_task(self, task):
self.task = task
self.task.data.update(self.task_data)
self.task_type = task.task_spec.__class__.__name__
self.spec = task.task_spec
self.task_name = task.get_name()
self.activity = getattr(self.spec, , )
self._set_lane_data() | Assigns current task step to self.task
then updates the task's data with self.task_data
Args:
task: Task object. |
382,471 | def _set_overlay_service_policy(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=overlay_service_policy.overlay_service_policy, is_container=, presence=False, yang_name="overlay-service-policy", rest_name="overlay-service-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__overlay_service_policy = t
if hasattr(self, ):
self._set() | Setter method for overlay_service_policy, mapped from YANG variable /overlay_gateway/overlay_service_policy (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_overlay_service_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_overlay_service_policy() directly. |
382,472 | def _process_using_meta_feature_generator(self, X, meta_feature_generator):
all_learner_meta_features = []
for idx, base_learner in enumerate(self.base_learners):
single_learner_meta_features = getattr(base_learner,
self.meta_feature_generators[idx])(X)
if len(single_learner_meta_features.shape) == 1:
single_learner_meta_features = single_learner_meta_features.reshape(-1, 1)
all_learner_meta_features.append(single_learner_meta_features)
all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1)
out = getattr(self.secondary_learner, meta_feature_generator)(all_learner_meta_features)
return out | Process using secondary learner meta-feature generator
Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba,
this internal method gives the ability to use any string. Just make sure secondary learner
has the method.
Args:
X (array-like): Features array
meta_feature_generator (str, unicode): Method for use by secondary learner |
382,473 | def extract_input(pipe_def=None, pipe_generator=None):
if pipe_def:
pyinput = gen_input(pipe_def)
elif pipe_generator:
pyinput = pipe_generator(Context(describe_input=True))
else:
raise Exception()
return sorted(list(pyinput)) | Extract inputs required by a pipe |
382,474 | def validate_units(self):
if (not isinstance(self.waveunits, units.WaveUnits)):
raise TypeError("%s is not a valid WaveUnit" % self.waveunits)
if (not isinstance(self.fluxunits, units.FluxUnits)):
raise TypeError("%s is not a valid FluxUnit" % self.fluxunits) | Ensure that wavelenth and flux units belong to the
correct classes.
Raises
------
TypeError
Wavelength unit is not `~pysynphot.units.WaveUnits` or
flux unit is not `~pysynphot.units.FluxUnits`. |
382,475 | def clear(self):
self.io.seek(0)
self.io.truncate()
for item in self.monitors:
item[2] = 0 | Removes all data from the buffer. |
382,476 | def propertyWidgetMap(self):
out = {}
scaffold = self.scaffold()
for widget in self.findChildren(QtGui.QWidget):
propname = unwrapVariant(widget.property())
if not propname: continue
prop = scaffold.property(propname)
if not prop: continue
out[prop] = widget
return out | Returns the mapping for this page between its widgets and its
scaffold property.
:return {<projex.scaffold.Property>: <QtGui.QWidget>, ..} |
382,477 | def directory_open(self, path, filter_p, flags):
if not isinstance(path, basestring):
raise TypeError("path can only be an instance of type basestring")
if not isinstance(filter_p, basestring):
raise TypeError("filter_p can only be an instance of type basestring")
if not isinstance(flags, list):
raise TypeError("flags can only be an instance of type list")
for a in flags[:10]:
if not isinstance(a, DirectoryOpenFlag):
raise TypeError(
"array can only contain objects of type DirectoryOpenFlag")
directory = self._call("directoryOpen",
in_p=[path, filter_p, flags])
directory = IGuestDirectory(directory)
return directory | Opens a directory in the guest and creates a :py:class:`IGuestDirectory`
object that can be used for further operations.
This method follows symbolic links by default at the moment, this
may change in the future.
in path of type str
Path to the directory to open. Guest path style.
in filter_p of type str
Optional directory listing filter to apply. This uses the DOS/NT
style wildcard characters '?' and '*'.
in flags of type :class:`DirectoryOpenFlag`
Zero or more :py:class:`DirectoryOpenFlag` flags.
return directory of type :class:`IGuestDirectory`
:py:class:`IGuestDirectory` object containing the opened directory.
raises :class:`VBoxErrorObjectNotFound`
Directory to open was not found.
raises :class:`VBoxErrorIprtError`
Error while opening the directory.
raises :class:`VBoxErrorMaximumReached`
The maximum of concurrent guest directories has been reached. |
382,478 | def parse_date(datestring):
datestring = str(datestring).strip()
if not datestring[0].isdigit():
raise ParseError()
if in datestring.upper():
try:
datestring = datestring[:-1] + str(int(datestring[-1:]) -1)
except:
pass
for regex, pattern in DATE_FORMATS:
if regex.match(datestring):
found = regex.search(datestring).groupdict()
dt = datetime.utcnow().strptime(found[], pattern)
if in found and found[] is not None:
dt = dt.replace(microsecond=int(found[][1:]))
if in found and found[] is not None:
dt = dt.replace(tzinfo=Timezone(found.get(, )))
return dt
return parse_time(datestring) | Attepmts to parse an ISO8601 formatted ``datestring``.
Returns a ``datetime.datetime`` object. |
382,479 | def makedirs(path, ignore_extsep=False):
if not ignore_extsep and op.basename(path).find(os.extsep) > -1:
path = op.dirname(path)
try:
os.makedirs(path)
except:
return False
return True | Makes all directories required for given path; returns true if successful
and false otherwise.
**Examples**:
::
auxly.filesys.makedirs("bar/baz") |
382,480 | def cross_origin(app, *args, **kwargs):
_options = kwargs
_real_decorator = cors.decorate(app, *args, run_middleware=False, with_context=False, **kwargs)
def wrapped_decorator(f):
spf = SanicPluginsFramework(app)
try:
plugin = spf.register_plugin(cors, skip_reg=True)
except ValueError as e:
assert e.args and len(e.args) > 1
plugin = e.args[1]
context = cors.get_context_from_spf(spf)
log = context.log
log(logging.DEBUG, "Enabled {:s} for cross_origin using options: {}".format(str(f), str(_options)))
return _real_decorator(f)
return wrapped_decorator | This function is the decorator which is used to wrap a Sanic route with.
In the simplest case, simply use the default parameters to allow all
origins in what is the most permissive configuration. If this method
modifies state or performs authentication which may be brute-forced, you
should add some degree of protection, such as Cross Site Forgery
Request protection.
:param origins:
The origin, or list of origins to allow requests from.
The origin(s) may be regular expressions, case-sensitive strings,
or else an asterisk
Default : '*'
:type origins: list, string or regex
:param methods:
The method or list of methods which the allowed origins are allowed to
access for non-simple requests.
Default : [GET, HEAD, POST, OPTIONS, PUT, PATCH, DELETE]
:type methods: list or string
:param expose_headers:
The header or list which are safe to expose to the API of a CORS API
specification.
Default : None
:type expose_headers: list or string
:param allow_headers:
The header or list of header field names which can be used when this
resource is accessed by allowed origins. The header(s) may be regular
expressions, case-sensitive strings, or else an asterisk.
Default : '*', allow all headers
:type allow_headers: list, string or regex
:param supports_credentials:
Allows users to make authenticated requests. If true, injects the
`Access-Control-Allow-Credentials` header in responses. This allows
cookies and credentials to be submitted across domains.
:note: This option cannot be used in conjuction with a '*' origin
Default : False
:type supports_credentials: bool
:param max_age:
The maximum time for which this CORS request maybe cached. This value
is set as the `Access-Control-Max-Age` header.
Default : None
:type max_age: timedelta, integer, string or None
:param send_wildcard: If True, and the origins parameter is `*`, a wildcard
`Access-Control-Allow-Origin` header is sent, rather than the
request's `Origin` header.
Default : False
:type send_wildcard: bool
:param vary_header:
If True, the header Vary: Origin will be returned as per the W3
implementation guidelines.
Setting this header when the `Access-Control-Allow-Origin` is
dynamically generated (e.g. when there is more than one allowed
origin, and an Origin than '*' is returned) informs CDNs and other
caches that the CORS headers are dynamic, and cannot be cached.
If False, the Vary header will never be injected or altered.
Default : True
:type vary_header: bool
:param automatic_options:
Only applies to the `cross_origin` decorator. If True, Sanic-CORS will
override Sanic's default OPTIONS handling to return CORS headers for
OPTIONS requests.
Default : True
:type automatic_options: bool |
382,481 | def match(self, *args):
self.fall = self.fall or not args
self.fall = self.fall or (self.value in args)
return self.fall | Whether or not to enter a given case statement |
382,482 | def add_to_context(self, name, **attrs):
context = self.get_context(name=name)
attrs_ = context[]
attrs_.update(**attrs) | Add attributes to a context. |
382,483 | def find_by(cls, parent=None, **attributes):
all_nones = not all(attributes.values())
if not attributes or all_nones:
raise cls.ResourceError()
matches = cls.filter(parent, **attributes)
if matches:
return matches[0] | Gets the first resource of the given type and parent (if provided) with matching attributes.
This will trigger an api GET request.
:param parent ResourceBase: the parent of the resource - used for nesting the request url, optional
:param **attributes: any number of keyword arguments as attributes to search the resource by
:returns: the matching resource, None if not found
:raises ResourceError: if the no valid attributes are provided |
382,484 | def get_history_kline(self,
code,
start=None,
end=None,
ktype=KLType.K_DAY,
autype=AuType.QFQ,
fields=[KL_FIELD.ALL]):
return self._get_history_kline_impl(GetHistoryKlineQuery, code, start=start, end=end,
ktype=ktype, autype=autype, fields=fields) | 得到本地历史k线,需先参照帮助文档下载k线
:param code: 股票代码
:param start: 开始时间,例如'2017-06-20'
:param end: 结束时间,例如'2017-06-30'
start和end的组合如下:
========== ========== ========================================
start类型 end类型 说明
========== ========== ========================================
str str start和end分别为指定的日期
None str start为end往前365天
str None end为start往后365天
None None end为当前日期,start为end往前365天
========== ========== ========================================
:param ktype: k线类型, 参见 KLType 定义
:param autype: 复权类型, 参见 AuType 定义
:param fields: 需返回的字段列表,参见 KL_FIELD 定义 KL_FIELD.ALL KL_FIELD.OPEN ....
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下
ret != RET_OK 返回错误字符串
================= =========== ==============================================================================
参数 类型 说明
================= =========== ==============================================================================
code str 股票代码
time_key str k线时间
open float 开盘价
close float 收盘价
high float 最高价
low float 最低价
pe_ratio float 市盈率(该字段为比例字段,默认不展示%)
turnover_rate float 换手率
volume int 成交量
turnover float 成交额
change_rate float 涨跌幅
last_close float 昨收价
================= =========== ==============================================================================
:example:
.. code:: python
from futuquant import *
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
print(quote_ctx.get_history_kline('HK.00700', start='2017-06-20', end='2017-06-22'))
quote_ctx.close() |
382,485 | def bgblack(cls, string, auto=False):
return cls.colorize(, string, auto=auto) | Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color |
382,486 | def reporter(self):
logging.info(.format(self.analysistype))
make_path(self.reportpath)
header =
data =
for sample in self.metadata:
try:
data += .format(sample.name,
sample[self.analysistype].closestrefseqgenus,
sample[self.analysistype].closestrefseq,
sample[self.analysistype].mashdistance,
sample[self.analysistype].pvalue,
sample[self.analysistype].nummatches)
except AttributeError:
data += .format(sample.name)
reportfile = os.path.join(self.reportpath, )
with open(reportfile, ) as report:
report.write(header)
report.write(data) | Create the MASH report |
382,487 | def stats(self, index=None, metric=None, params=None):
return self.transport.perform_request(
"GET", _make_path(index, "_stats", metric), params=params
) | Retrieve statistics on different operations happening on an index.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg metric: Limit the information returned the specific metrics.
:arg completion_fields: A comma-separated list of fields for `fielddata`
and `suggest` index metric (supports wildcards)
:arg fielddata_fields: A comma-separated list of fields for `fielddata`
index metric (supports wildcards)
:arg fields: A comma-separated list of fields for `fielddata` and
`completion` index metric (supports wildcards)
:arg groups: A comma-separated list of search groups for `search` index
metric
:arg include_segment_file_sizes: Whether to report the aggregated disk
usage of each one of the Lucene index files (only applies if segment
stats are requested), default False
:arg level: Return stats aggregated at cluster, index or shard level,
default 'indices', valid choices are: 'cluster', 'indices', 'shards'
:arg types: A comma-separated list of document types for the `indexing`
index metric |
382,488 | def _create_archive(self):
try:
self.archive_path = os.path.join(self.report_dir, "%s.tar.gz" % self.session)
self.logger.con_out(, self.archive_path)
t = tarfile.open(self.archive_path, )
for dirpath, dirnames, filenames in os.walk(self.dir_path):
for f in filenames:
f_full = os.path.join(dirpath, f)
f_archive = f_full.replace(self.report_dir,)
self.logger.debug(, f_archive, self.archive_path)
t.add(f_full, arcname=f_archive)
except Exception as e:
self.logger.exception(e)
raise Exception()
self._clean_up()
self.logger.info()
self.logger.con_out()
if not self.quiet:
t.add(self.logfile, arcname=self.logfile.replace(self.report_dir,))
t.close() | This will create a tar.gz compressed archive of the scrubbed directory |
382,489 | def _parse_line(sep, line):
strs = line.split(sep, 1)
return (strs[0].strip(), None) if len(strs) == 1 else (strs[0].strip(), strs[1].strip()) | Parse a grub commands/config with format: cmd{sep}opts
Returns: (name, value): value can be None |
382,490 | def load_shapefile(self, feature_type, base_path):
path = % base_path
if not os.path.exists(path):
message = self.tr(
% path)
raise FileMissingError(message)
self.iface.addVectorLayer(path, feature_type, ) | Load downloaded shape file to QGIS Main Window.
TODO: This is cut & paste from OSM - refactor to have one method
:param feature_type: What kind of features should be downloaded.
Currently 'buildings', 'building-points' or 'roads' are supported.
:type feature_type: str
:param base_path: The base path of the shape file (without extension).
:type base_path: str
:raises: FileMissingError - when buildings.shp not exist |
382,491 | def outline(self, face_ids=None, **kwargs):
from .path.exchange.misc import faces_to_path
from .path.exchange.load import _create_path
path = _create_path(**faces_to_path(self,
face_ids,
**kwargs))
return path | Given a list of face indexes find the outline of those
faces and return it as a Path3D.
The outline is defined here as every edge which is only
included by a single triangle.
Note that this implies a non-watertight mesh as the
outline of a watertight mesh is an empty path.
Parameters
----------
face_ids : (n,) int
Indices to compute the outline of.
If None, outline of full mesh will be computed.
**kwargs: passed to Path3D constructor
Returns
----------
path : Path3D
Curve in 3D of the outline |
382,492 | def optimize(self, x0, f=None, df=None, f_df=None):
if len(self.bounds) == 1:
raise IndexError("CMA does not work in problems of dimension 1.")
try:
import cma
def CMA_f_wrapper(f):
def g(x):
return f(np.array([x]))
return g
lB = np.asarray(self.bounds)[:,0]
uB = np.asarray(self.bounds)[:,1]
x = cma.fmin(CMA_f_wrapper(f), x0, 0.6, options={"bounds":[lB, uB], "verbose":-1})[0]
return np.atleast_2d(x), f(np.atleast_2d(x))
except ImportError:
print("Cannot find cma library, please install it to use this option.")
raise | :param x0: initial point for a local optimizer.
:param f: function to optimize.
:param df: gradient of the function to optimize.
:param f_df: returns both the function to optimize and its gradient. |
382,493 | def kruskal(dv=None, between=None, data=None, detailed=False,
export_filename=None):
from scipy.stats import chi2, rankdata, tiecorrect
_check_dataframe(dv=dv, between=between, data=data,
effects=)
data = data.dropna()
data = data.reset_index(drop=True)
groups = list(data[between].unique())
n_groups = len(groups)
n = data[dv].size
data[] = rankdata(data[dv])
grp = data.groupby(between)[]
sum_rk_grp = grp.sum().values
n_per_grp = grp.count().values
H = (12 / (n * (n + 1)) * np.sum(sum_rk_grp**2 / n_per_grp)) - 3 * (n + 1)
H /= tiecorrect(data[].values)
ddof1 = n_groups - 1
p_unc = chi2.sf(H, ddof1)
stats = pd.DataFrame({: between,
: ddof1,
: np.round(H, 3),
: p_unc,
}, index=[])
col_order = [, , , ]
stats = stats.reindex(columns=col_order)
stats.dropna(how=, axis=1, inplace=True)
if export_filename is not None:
_export_table(stats, export_filename)
return stats | Kruskal-Wallis H-test for independent samples.
Parameters
----------
dv : string
Name of column containing the dependant variable.
between : string
Name of column containing the between factor.
data : pandas DataFrame
DataFrame
export_filename : string
Filename (without extension) for the output file.
If None, do not export the table.
By default, the file will be created in the current python console
directory. To change that, specify the filename with full path.
Returns
-------
stats : DataFrame
Test summary ::
'H' : The Kruskal-Wallis H statistic, corrected for ties
'p-unc' : Uncorrected p-value
'dof' : degrees of freedom
Notes
-----
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes.
Due to the assumption that H has a chi square distribution, the number of
samples in each group must not be too small. A typical rule is that each
sample must have at least 5 measurements.
NaN values are automatically removed.
Examples
--------
Compute the Kruskal-Wallis H-test for independent samples.
>>> from pingouin import kruskal, read_dataset
>>> df = read_dataset('anova')
>>> kruskal(dv='Pain threshold', between='Hair color', data=df)
Source ddof1 H p-unc
Kruskal Hair color 3 10.589 0.014172 |
382,494 | def mod_repo(repo, **kwargs):
**
repos = list_repos()
found = False
uri =
if in kwargs:
uri = kwargs[]
for repository in repos:
source = repos[repository][0]
if source[] == repo:
found = True
repostr =
if in kwargs and not kwargs[]:
repostr +=
if in kwargs:
repostr += if kwargs[] else
else:
repostr += if source[] else
repo_alias = kwargs[] if in kwargs else repo
if in repo_alias:
repostr += .format(repo_alias)
else:
repostr += .format(repo_alias)
repostr += .format(kwargs[] if in kwargs else source[])
trusted = kwargs.get()
repostr = _set_trusted_option_if_needed(repostr, trusted) if trusted is not None else \
_set_trusted_option_if_needed(repostr, source.get())
_mod_repo_in_file(repo, repostr, source[])
elif uri and source[] == uri:
raise CommandExecutionError(
{0}\{1}\.format(uri, source[]))
if not found:
if not in kwargs:
raise CommandExecutionError(
{0}\.format(repo))
properties = {: kwargs[]}
properties[] = kwargs[] if in kwargs else True
properties[] = kwargs[] if in kwargs else True
properties[] = kwargs.get()
_add_new_repo(repo, properties)
if in kwargs:
refresh_db() | Modify one or more values for a repo. If the repo does not exist, it will
be created, so long as uri is defined.
The following options are available to modify a repo definition:
repo
alias by which opkg refers to the repo.
uri
the URI to the repo.
compressed
defines (True or False) if the index file is compressed
enabled
enable or disable (True or False) repository
but do not remove if disabled.
refresh
enable or disable (True or False) auto-refresh of the repositories
CLI Examples:
.. code-block:: bash
salt '*' pkg.mod_repo repo uri=http://new/uri
salt '*' pkg.mod_repo repo enabled=False |
382,495 | def wiki_list(self, title=None, creator_id=None, body_matches=None,
other_names_match=None, creator_name=None, hide_deleted=None,
other_names_present=None, order=None):
params = {
: title,
: creator_id,
: body_matches,
: other_names_match,
: creator_name,
: hide_deleted,
: other_names_present,
: order
}
return self._get(, params) | Function to retrieves a list of every wiki page.
Parameters:
title (str): Page title.
creator_id (int): Creator id.
body_matches (str): Page content.
other_names_match (str): Other names.
creator_name (str): Creator name.
hide_deleted (str): Can be: yes, no.
other_names_present (str): Can be: yes, no.
order (str): Can be: date, title. |
382,496 | def create_parser_options(lazy_mfcollection_parsing: bool = False) -> Dict[str, Dict[str, Any]]:
return {MultifileCollectionParser.__name__: {: lazy_mfcollection_parsing}} | Utility method to create a default options structure with the lazy parsing inside
:param lazy_mfcollection_parsing:
:return: the options structure filled with lazyparsing option (for the MultifileCollectionParser) |
382,497 | def getSpanDurations(self, time_stamp, service_name, rpc_name):
self.send_getSpanDurations(time_stamp, service_name, rpc_name)
return self.recv_getSpanDurations() | Given a time stamp, server service name, and rpc name, fetch all of the client services calling in paired
with the lists of every span duration (list<i64>) from the server to client. The lists of span durations
include information on call counts and mean/stdDev/etc of call durations.
The three arguments specify epoch time in microseconds, server side service name and rpc name. The return maps
contains the key - client_service_name and value - list<span_durations>.
Parameters:
- time_stamp
- service_name
- rpc_name |
382,498 | def paramtypes(self):
for m in [p[1] for p in self.ports]:
for p in [p[1] for p in m]:
for pd in p:
if pd[1] in self.params:
continue
item = (pd[1], pd[1].resolve())
self.params.append(item) | get all parameter types |
382,499 | def unquote(str):
if len(str) > 1:
if str.startswith() and str.endswith():
return str[1:-1].replace(, ).replace(, )
if str.startswith() and str.endswith():
return str[1:-1]
return str | Remove quotes from a string. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.