Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
27,400 |
def drape(raster, feature):
coords = feature[][]
geom_type = feature[][]
if geom_type == :
xyz = sample(raster, [coords])
result = Point(xyz[0])
elif geom_type == :
xyz = sample(raster, coords)
points = [Point(x, y, z) for x, y, z in xyz]
result = LineString(points)
else:
logging.error(.format(geom_type))
return result
|
Convert a 2D feature to a 3D feature by sampling a raster
Parameters:
raster (rasterio): raster to provide the z coordinate
feature (dict): fiona feature record to convert
Returns:
result (Point or Linestring): shapely Point or LineString of xyz coordinate triples
|
27,401 |
def get_access_id(self):
access_id = self.get_as_nullable_string("access_id")
access_id = access_id if access_id != None else self.get_as_nullable_string("client_id")
return access_id
|
Gets the application access id. The value can be stored in parameters "access_id" pr "client_id"
:return: the application access id.
|
27,402 |
def is_safe(self):
if self._number == defines.OptionRegistry.URI_HOST.number \
or self._number == defines.OptionRegistry.URI_PORT.number \
or self._number == defines.OptionRegistry.URI_PATH.number \
or self._number == defines.OptionRegistry.MAX_AGE.number \
or self._number == defines.OptionRegistry.URI_QUERY.number \
or self._number == defines.OptionRegistry.PROXY_URI.number \
or self._number == defines.OptionRegistry.PROXY_SCHEME.number:
return False
return True
|
Check if the option is safe.
:rtype : bool
:return: True, if option is safe
|
27,403 |
def _build_number_type(var, property_path=None):
if not property_path:
property_path = []
schema = {"type": "number"}
if is_builtin_type(var):
return schema
if is_config_var(var):
schema.update(
_build_attribute_modifiers(var, {"min": "minimum", "max": "maximum"})
)
return schema
|
Builds schema definitions for number type values.
:param var: The number type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any]
|
27,404 |
def Tm_depression_eutectic(Tm, Hm, x=None, M=None, MW=None):
r
if x:
dTm = R*Tm**2*x/Hm
elif M and MW:
MW = MW/1000.
dTm = R*Tm**2*MW*M/Hm
else:
raise Exception()
return dTm
|
r'''Returns the freezing point depression caused by a solute in a solvent.
Can use either the mole fraction of the solute or its molality and the
molecular weight of the solvent. Assumes ideal system behavior.
.. math::
\Delta T_m = \frac{R T_m^2 x}{\Delta H_m}
\Delta T_m = \frac{R T_m^2 (MW) M}{1000 \Delta H_m}
Parameters
----------
Tm : float
Melting temperature of the solute [K]
Hm : float
Heat of melting at the melting temperature of the solute [J/mol]
x : float, optional
Mole fraction of the solute [-]
M : float, optional
Molality [mol/kg]
MW: float, optional
Molecular weight of the solvent [g/mol]
Returns
-------
dTm : float
Freezing point depression [K]
Notes
-----
MW is the molecular weight of the solvent. M is the molality of the solute.
Examples
--------
From [1]_, matching example.
>>> Tm_depression_eutectic(353.35, 19110, .02)
1.0864594900639515
References
----------
.. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
|
27,405 |
def copy_file_content(self, file_id, source_file):
if not is_valid_uuid(file_id):
raise StorageArgumentException(
.format(file_id))
if not is_valid_uuid(source_file):
raise StorageArgumentException(
.format(source_file))
self._authenticated_request \
.to_endpoint(.format(file_id)) \
.with_headers({: source_file}) \
.put()
|
Copy file content from source file to target file.
Args:
file_id (str): The UUID of the file whose content is written.
source_file (str): The UUID of the file whose content is copied.
Returns:
None
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
|
27,406 |
def __prepare_resource(data):
if not data:
return None
data = unicode(data)
try:
resource = RESOURCEPREP.prepare(data)
except StringprepError, err:
raise JIDError(u"Local part invalid: {0}".format(err))
if len(resource.encode("utf-8")) > 1023:
raise JIDError("Resource name too long")
return resource
|
Prepare the resourcepart of the JID.
:Parameters:
- `data`: Resourcepart of the JID
:raise JIDError: if the resource name is too long.
:raise pyxmpp.xmppstringprep.StringprepError: if the
resourcepart fails Resourceprep preparation.
|
27,407 |
def plot_signal_sum_colorplot(ax, params, fname=, unit=, N=1, ylabels = True,
T=[800, 1000], ylim=[-1500, 0], fancy=False, colorbar=True,
cmap=, absmax=None, transient=200, rasterized=True):
f = h5py.File(fname)
data = f[].value
tvec = np.arange(data.shape[1]) * 1000. / f[].value
datameanaxis1 = f[].value[:, tvec >= transient].mean(axis=1)
slica = (tvec <= T[1]) & (tvec >= T[0])
data = data[:,slica]
dataT = data.T - datameanaxis1
data = dataT.T
data = data/N
zvec = params.electrodeParams[]
if fancy:
colors = phlp.get_colors(data.shape[0])
else:
colors = []*data.shape[0]
if absmax == None:
absmax=abs(np.array([data.max(), data.min()])).max()
im = ax.pcolormesh(tvec[slica], np.r_[zvec, zvec[-1] + np.diff(zvec)[-1]] + 50, data,
rasterized=rasterized, vmax=absmax, vmin=-absmax, cmap=cmap)
ax.set_yticks(params.electrodeParams[])
if ylabels:
yticklabels = [ %(i+1) for i in np.arange(len(params.electrodeParams[]))]
ax.set_yticklabels(yticklabels)
else:
ax.set_yticklabels([])
if colorbar:
divider=make_axes_locatable(ax)
cax=divider.append_axes("right", size="5%", pad=0.1)
cbar=plt.colorbar(im, cax=cax)
cbar.set_label(unit,labelpad=0.1)
plt.axis()
ax.set_ylim(ylim)
f.close()
return im
|
on colorplot and as background plot the summed CSD contributions
args:
::
ax : matplotlib.axes.AxesSubplot object
T : list, [tstart, tstop], which timeinterval
ylims : list, set range of yaxis to scale with other plots
fancy : bool,
N : integer, set to number of LFP generators in order to get the normalized signal
|
27,408 |
def start(self, timeout=None):
assert self.state == STOPPED, "Process already started"
self.state = STARTING
should_publish = self._start_controllers(
self._controllers.values(), timeout)
if should_publish:
self._publish_controllers(timeout)
self.state = STARTED
|
Start the process going
Args:
timeout (float): Maximum amount of time to wait for each spawned
process. None means forever
|
27,409 |
def match_description(self, description, string_match_type=DEFAULT_STRING_MATCH_TYPE, match=True):
self._match_display_text(, description, string_match_type, match)
|
Adds a description name to match.
Multiple description matches can be added to perform a boolean
``OR`` among them.
arg: description (string): description to match
arg: string_match_type (osid.type.Type): the string match
type
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: InvalidArgument - ``description`` is not of
``string_match_type``
raise: NullArgument - ``description`` or ``string_match_type``
is ``null``
raise: Unsupported -
``supports_string_match_type(string_match_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
|
27,410 |
def update_total(self, n=1):
with self._lock:
self._pbar.total += n
self.refresh()
|
Increment total pbar value.
|
27,411 |
def tds7_process_result(self):
self.log_response_message()
r = self._reader
num_cols = r.get_smallint()
if num_cols == -1:
return
self.param_info = None
self.has_status = False
self.ret_status = None
self.skipped_to_status = False
self.rows_affected = tds_base.TDS_NO_COUNT
self.more_rows = True
self.row = [None] * num_cols
self.res_info = info = _Results()
header_tuple = []
for col in range(num_cols):
curcol = tds_base.Column()
info.columns.append(curcol)
self.get_type_info(curcol)
curcol.column_name = r.read_ucs2(r.get_byte())
precision = curcol.serializer.precision
scale = curcol.serializer.scale
size = curcol.serializer.size
header_tuple.append(
(curcol.column_name,
curcol.serializer.get_typeid(),
None,
size,
precision,
scale,
curcol.flags & tds_base.Column.fNullable))
info.description = tuple(header_tuple)
return info
|
Reads and processes COLMETADATA stream
This stream contains a list of returned columns.
Stream format link: http://msdn.microsoft.com/en-us/library/dd357363.aspx
|
27,412 |
def construct(parent=None, defaults=None, **kwargs):
for key in kwargs:
assert key in LEGAL_ATTRS, "{} is not legal input".format(key)
if parent is not None:
for key, value in LEGAL_ATTRS.items():
if key not in kwargs and hasattr(parent, value):
kwargs[key] = getattr(parent, value)
assert "cdf" in kwargs, "cdf function must be defined"
assert "bnd" in kwargs, "bnd function must be defined"
if "str" in kwargs and isinstance(kwargs["str"], str):
string = kwargs.pop("str")
kwargs["str"] = lambda *args, **kwargs: string
defaults = defaults if defaults else {}
for key in defaults:
assert key in LEGAL_ATTRS, "invalid default value {}".format(key)
def custom_distribution(**kws):
prm = defaults.copy()
prm.update(kws)
dist = Dist(**prm)
for key, function in kwargs.items():
attr_name = LEGAL_ATTRS[key]
setattr(dist, attr_name, types.MethodType(function, dist))
return dist
if "doc" in kwargs:
custom_distribution.__doc__ = kwargs["doc"]
return custom_distribution
|
Random variable constructor.
Args:
cdf:
Cumulative distribution function. Optional if ``parent`` is used.
bnd:
Boundary interval. Optional if ``parent`` is used.
parent (Dist):
Distribution used as basis for new distribution. Any other argument
that is omitted will instead take is function from ``parent``.
doc (str]):
Documentation for the distribution.
str (str, :py:data:typing.Callable):
Pretty print of the variable.
pdf:
Probability density function.
ppf:
Point percentile function.
mom:
Raw moment generator.
ttr:
Three terms recursion coefficient generator.
init:
Custom initialiser method.
defaults (dict):
Default values to provide to initialiser.
Returns:
(Dist):
New custom distribution.
|
27,413 |
def _set_ldp_params(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ldp_params.ldp_params, is_container=, presence=False, yang_name="ldp-params", rest_name="ldp-params", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__ldp_params = t
if hasattr(self, ):
self._set()
|
Setter method for ldp_params, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/ldp_params (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_params is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_params() directly.
|
27,414 |
def __store_processing_state(self):
steps = self.Application_Progress_Status_processing.Processing_progressBar.maximum()
value = self.Application_Progress_Status_processing.Processing_progressBar.value()
message = self.Application_Progress_Status_processing.Processing_label.text()
state = self.__is_processing
self.__processing_state = steps, value, message, state
|
Stores the processing state.
|
27,415 |
def send_produce_request(self, payloads=(), acks=1, timeout=1000,
fail_on_error=True, callback=None):
encoder = functools.partial(
KafkaProtocol.encode_produce_request,
acks=acks,
timeout=timeout)
if acks == 0:
decoder = None
else:
decoder = KafkaProtocol.decode_produce_response
resps = self._send_broker_aware_request(payloads, encoder, decoder)
return [resp if not callback else callback(resp) for resp in resps
if resp is not None and
(not fail_on_error or not self._raise_on_response_error(resp))]
|
Encode and send some ProduceRequests
ProduceRequests will be grouped by (topic, partition) and then
sent to a specific broker. Output is a list of responses in the
same order as the list of payloads specified
Arguments:
payloads (list of ProduceRequest): produce requests to send to kafka
ProduceRequest payloads must not contain duplicates for any
topic-partition.
acks (int, optional): how many acks the servers should receive from replica
brokers before responding to the request. If it is 0, the server
will not send any response. If it is 1, the server will wait
until the data is written to the local log before sending a
response. If it is -1, the server will wait until the message
is committed by all in-sync replicas before sending a response.
For any value > 1, the server will wait for this number of acks to
occur (but the server will never wait for more acknowledgements than
there are in-sync replicas). defaults to 1.
timeout (int, optional): maximum time in milliseconds the server can
await the receipt of the number of acks, defaults to 1000.
fail_on_error (bool, optional): raise exceptions on connection and
server response errors, defaults to True.
callback (function, optional): instead of returning the ProduceResponse,
first pass it through this function, defaults to None.
Returns:
list of ProduceResponses, or callback results if supplied, in the
order of input payloads
|
27,416 |
def get_matches(self, src, src_idx):
if src not in (, ):
raise ValueError()
if src == :
target_list = self.l2
else:
target_list = self.l1
comparator = {
: lambda s_idx, t_idx: (s_idx, t_idx) in self.matches,
: lambda s_idx, t_idx: (t_idx, s_idx) in self.matches,
}[src]
return [(trg_idx, obj) for trg_idx, obj in enumerate(target_list)
if comparator(src_idx, trg_idx)]
|
Get elements equal to the idx'th in src from the other list.
e.g. get_matches(self, 'l1', 0) will return all elements from self.l2
matching with self.l1[0]
|
27,417 |
def SynchronizedClassMethod(*locks_attr_names, **kwargs):
locks_attr_names = [
lock_name for lock_name in locks_attr_names if lock_name
]
if not locks_attr_names:
raise ValueError("The lock names list can{0}t be None in class {1}".format(
locks_attr_names[i], type(self).__name__
)
)
i += 1
lock.acquire()
locked.appendleft(lock)
return method(self, *args, **kwargs)
finally:
for lock in locked:
lock.release()
locked.clear()
del locks[:]
return synchronized
return wrapped
|
A synchronizer decorator for class methods. An AttributeError can be raised
at runtime if the given lock attribute doesn't exist or if it is None.
If a parameter ``sorted`` is found in ``kwargs`` and its value is True,
then the list of locks names will be sorted before locking.
:param locks_attr_names: A list of the lock(s) attribute(s) name(s) to be
used for synchronization
:return: The decorator method, surrounded with the lock
|
27,418 |
def normalizeFilePath(value):
if not isinstance(value, basestring):
raise TypeError("File paths must be strings, not %s."
% type(value).__name__)
return unicode(value)
|
Normalizes file path.
* **value** must be a :ref:`type-string`.
* Returned value is an unencoded ``unicode`` string
|
27,419 |
def pipe_strtransform(context=None, _INPUT=None, conf=None, **kwargs):
splits = get_splits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = utils.dispatch(splits, *get_dispatch_funcs())
_OUTPUT = starmap(parse_result, parsed)
return _OUTPUT
|
A string module that splits a string into tokens delimited by
separators. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings
conf : {'transformation': {value': <'swapcase'>}}
Returns
-------
_OUTPUT : generator of tokenized strings
|
27,420 |
def divine_format(text):
try:
nbformat.reads(text, as_version=4)
return
except nbformat.reader.NotJSONError:
pass
lines = text.splitlines()
for comment in [, ] + _COMMENT_CHARS:
metadata, _, _, _ = header_to_metadata_and_cell(lines, comment)
ext = metadata.get(, {}).get(, {}).get()
if ext:
return ext[1:] + + guess_format(text, ext)[0]
for line in lines:
if line == :
return
return + guess_format(text, )[0]
|
Guess the format of the notebook, based on its content #148
|
27,421 |
def construct_channel(self, *args, **kwargs):
channel = self.get_channel(*args, **kwargs)
city_topic = TopicNode(source_id="List_of_largest_cities", title="Cities!")
channel.add_child(city_topic)
add_subpages_from_wikipedia_list(city_topic, "https://en.wikipedia.org/wiki/List_of_largest_cities")
return channel
|
Create ChannelNode and build topic tree.
|
27,422 |
def main():
args = parse_args()
try:
s = pyhsm.base.YHSM(device=args.device, debug=args.debug)
get_entropy(s, args.iterations, args.ratio)
return 0
except pyhsm.exception.YHSM_Error as e:
sys.stderr.write("ERROR: %s" % (e.reason))
return 1
|
What will be executed when running as a stand alone program.
|
27,423 |
def generalize_sql(sql):
if sql is None:
return None
sql = re.sub(r, , sql)
sql = remove_comments_from_sql(sql)
sql = normalize_likes(sql)
sql = re.sub(r"\\\\", , sql)
sql = re.sub(r"\\\\"[^\", , sql)
sql = re.sub(r, , sql)
sql = re.sub(r, , sql)
sql = re.sub(r, , sql)
sql = re.sub(r, , sql, flags=re.IGNORECASE)
return sql.strip()
|
Removes most variables from an SQL query and replaces them with X or N for numbers.
Based on Mediawiki's DatabaseBase::generalizeSQL
:type sql str|None
:rtype: str
|
27,424 |
def show_rules():
from rules.loader import import_rules
from rules.rule_list import all_rules
rules = import_rules(all_rules)
print("")
for name, rule in rules.iteritems():
heading = "{} (`{}`)".format(rule.description(), name)
print("
for line in rule.reason():
print(line)
print("")
sys.exit(0)
|
Show the list of available rules and quit
:return:
|
27,425 |
def assign(self, role):
if role.owner_id != self.id:
return self.roles.add(role)
|
Assign :class:`Role` ``role`` to this :class:`Subject`. If this
:class:`Subject` is the :attr:`Role.owner`, this method does nothing.
|
27,426 |
def submit(self, code: str, results: str ="html", prompt: dict = None) -> dict:
userpwdsnameresults)
print(results[])
HTML(results[])
inlineinline;*\";*/;"
lstf = b
logf = b
bail = False
eof = 5
bc = False
done = False
logn = self._logcnt()
logcodei = "%put E3969440A681A24088859985" + logn + ";"
logcodeo = b"\nE3969440A681A24088859985" + logn.encode()
pcodei =
pcodeiv =
pcodeo =
pgm = b
if self.pid == None:
self._sb.SASpid = None
print("No SAS process attached. SAS process has terminated unexpectedly.")
return dict(LOG="No SAS process attached. SAS process has terminated unexpectedly.", LST=)
if os.name == :
try:
rc = self.pid.wait(0)
self.pid = None
self._sb.SASpid = None
return dict(LOG=+str(rc), LST=)
except:
pass
else:
if self.pid == None:
self._sb.SASpid = None
return "No SAS process attached. SAS process has terminated unexpectedly."
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[1]:
self.pid = None
self._sb.SASpid = None
return dict(LOG=+str(rc), LST=)
if results.upper() != "HTML":
ods = False
if len(prompt):
pcodei +=
pcodeo +=
for key in prompt:
gotit = False
while not gotit:
var = self.sascfg._prompt(+key+, pw=prompt[key])
if var is None:
raise KeyboardInterrupt
if len(var) > 0:
gotit = True
else:
print("Sorry, didn%let =;\n%symdel ;\n%let =;\noptions source notes;\noptions source notes;\n\n\n\n\n\ntom says EOL=\nnt\nSAS process has terminated unexpectedly. RC from wait was: replace\nSAS process has terminated unexpectedly. Pid State= replacetom says EOL=\nnt\nConnection Reset: SAS process has terminated unexpectedly. Pid State= replaceException caught!ABORTLOGLSTBCException handled :)\nException ignored, continuing to process...\ntom says EOL=\nreplacereplace%08d\n<body class="c body"><body class="l body">').replace("font-size: x-small;",
"font-size: normal;")
return dict(LOG=logd, LST=lstd)
|
This method is used to submit any SAS code. It returns the Log and Listing as a python dictionary.
code - the SAS statements you want to execute
results - format of results, HTML is default, TEXT is the alternative
prompt - dict of names:flags to prompt for; create macro variables (used in submitted code), then keep or delete
The keys are the names of the macro variables and the boolean flag is to either hide what you type and delete
the macros, or show what you type and keep the macros (they will still be available later)
for example (what you type for pw will not be displayed, user and dsname will):
results = sas.submit(
"""
libname tera teradata server=teracop1 user=&user pw=&pw;
proc print data=tera.&dsname (obs=10); run;
""" ,
prompt = {'user': False, 'pw': True, 'dsname': False}
)
Returns - a Dict containing two keys:values, [LOG, LST]. LOG is text and LST is 'results' (HTML or TEXT)
NOTE: to view HTML results in the ipykernel, issue: from IPython.display import HTML and use HTML() instead of print()
i.e,: results = sas.submit("data a; x=1; run; proc print;run')
print(results['LOG'])
HTML(results['LST'])
|
27,427 |
def balanced_accuracy(y_true, y_pred):
all_classes = list(set(np.append(y_true, y_pred)))
all_class_accuracies = []
for this_class in all_classes:
this_class_sensitivity = 0.
this_class_specificity = 0.
if sum(y_true == this_class) != 0:
this_class_sensitivity = \
float(sum((y_pred == this_class) & (y_true == this_class))) /\
float(sum((y_true == this_class)))
this_class_specificity = \
float(sum((y_pred != this_class) & (y_true != this_class))) /\
float(sum((y_true != this_class)))
this_class_accuracy = (this_class_sensitivity + this_class_specificity) / 2.
all_class_accuracies.append(this_class_accuracy)
return np.mean(all_class_accuracies)
|
Default scoring function: balanced accuracy.
Balanced accuracy computes each class' accuracy on a per-class basis using a
one-vs-rest encoding, then computes an unweighted average of the class accuracies.
Parameters
----------
y_true: numpy.ndarray {n_samples}
True class labels
y_pred: numpy.ndarray {n_samples}
Predicted class labels by the estimator
Returns
-------
fitness: float
Returns a float value indicating the individual's balanced accuracy
0.5 is as good as chance, and 1.0 is perfect predictive accuracy
|
27,428 |
def tweet(self, status, images):
template = "%s
print template % status, len(template % status)
|
if not images:
self.twitter.update_status(status=template % status)
else:
medias = map(lambda i: self.upload_media(i), images)
self.twitter.post('/statuses/update_with_media',
params={'status': template % status,
'media': medias[0]})
|
27,429 |
def _get_element_by_names(source, names):
if source is None:
return source
else:
if names:
head, *rest = names
if isinstance(source, dict) and head in source:
return _get_element_by_names(source[head], rest)
elif isinstance(source, list) and head.isdigit():
return _get_element_by_names(source[int(head)], rest)
elif not names[0]:
pass
else:
source = None
return source
|
Given a dict and path '/' or '.' separated. Digs into de dict to retrieve
the specified element.
Args:
source (dict): set of nested objects in which the data will be searched
path (list): list of attribute names
|
27,430 |
def tryCComment(self, block):
indentation = None
prevNonEmptyBlock = self._prevNonEmptyBlock(block)
if not prevNonEmptyBlock.isValid():
return None
prevNonEmptyBlockText = prevNonEmptyBlock.text()
if prevNonEmptyBlockText.endswith():
try:
foundBlock, notUsedColumn = self.findTextBackward(prevNonEmptyBlock, prevNonEmptyBlock.length(), )
except ValueError:
foundBlock = None
if foundBlock is not None:
dbg("tryCComment: success (1) in line %d" % foundBlock.blockNumber())
return self._lineIndent(foundBlock.text())
if prevNonEmptyBlock != block.previous():
return None
blockTextStripped = block.text().strip()
prevBlockTextStripped = prevNonEmptyBlockText.strip()
if prevBlockTextStripped.startswith() and not in prevBlockTextStripped:
indentation = self._blockIndent(prevNonEmptyBlock)
if CFG_AUTO_INSERT_STAR:
indentation +=
if not blockTextStripped.endswith():
indentation +=
secondCharIsSpace = len(blockTextStripped) > 1 and blockTextStripped[1].isspace()
if not secondCharIsSpace and \
not blockTextStripped.endswith("*/"):
indentation +=
dbg("tryCComment: success (2) in line %d" % block.blockNumber())
return indentation
elif prevBlockTextStripped.startswith() and \
(len(prevBlockTextStripped) == 1 or prevBlockTextStripped[1].isspace()):
dbg("tryCComment: success (2) in line %d" % block.blockNumber())
return indentation
return None
|
C comment checking. If the previous line begins with a "/*" or a "* ", then
return its leading white spaces + ' *' + the white spaces after the *
return: filler string or null, if not in a C comment
|
27,431 |
def parse(self, group_by_stmt):
if not group_by_stmt:
return Resolution.MAX_RESOLUTION
m = self.GROUP_BY_TIME_PATTERN.match(group_by_stmt)
if not m:
return None
value = int(m.group(1))
unit = m.group(2)
resolution = self.convert_to_seconds(value, unit)
return max(resolution, Resolution.MAX_RESOLUTION)
|
Extract the data resolution of a query in seconds
E.g. "group by time(99s)" => 99
:param group_by_stmt: A raw InfluxDB group by statement
|
27,432 |
def element(self, inp=None, order=None, **kwargs):
if inp is None:
return self.element_type(self, self.tspace.element(order=order))
elif inp in self and order is None:
return inp
elif inp in self.tspace and order is None:
return self.element_type(self, inp)
elif callable(inp):
vectorized = kwargs.pop(, True)
inp_elem = self.fspace.element(inp, vectorized=vectorized)
sampled = self.sampling(inp_elem, **kwargs)
return self.element_type(
self, self.tspace.element(sampled, order=order))
else:
return self.element_type(
self, self.tspace.element(inp, order=order))
|
Create an element from ``inp`` or from scratch.
Parameters
----------
inp : optional
Input used to initialize the new element. The following options
are available:
- ``None``: an empty element is created with no guarantee of
its state (memory allocation only). The new element will
use ``order`` as storage order if provided, otherwise
`default_order`.
- array-like: an element wrapping a `tensor` is created,
where a copy is avoided whenever possible. This usually
requires correct `shape`, `dtype` and `impl` if applicable,
and if ``order`` is provided, also contiguousness in that
ordering. See the ``element`` method of `tspace` for more
information.
If any of these conditions is not met, a copy is made.
- callable: a new element is created by sampling the function
using the `sampling` operator.
order : {None, 'C', 'F'}, optional
Storage order of the returned element. For ``'C'`` and ``'F'``,
contiguous memory in the respective ordering is enforced.
The default ``None`` enforces no contiguousness.
vectorized : bool, optional
If ``True``, assume that a provided callable ``inp`` supports
vectorized evaluation. Otherwise, wrap it in a vectorizer.
Default: ``True``.
kwargs :
Additional arguments passed on to `sampling` when called
on ``inp``, in the form ``sampling(inp, **kwargs)``.
This can be used e.g. for functions with parameters.
Returns
-------
element : `DiscreteLpElement`
The discretized element, calculated as ``sampling(inp)`` or
``tspace.element(inp)``, tried in this order.
Examples
--------
Elements can be created from array-like objects that represent
an already discretized function:
>>> space = odl.uniform_discr(-1, 1, 4)
>>> space.element([1, 2, 3, 4])
uniform_discr(-1.0, 1.0, 4).element([ 1., 2., 3., 4.])
>>> vector = odl.rn(4).element([0, 1, 2, 3])
>>> space.element(vector)
uniform_discr(-1.0, 1.0, 4).element([ 0., 1., 2., 3.])
On the other hand, non-discretized objects like Python functions
can be discretized "on the fly":
>>> space.element(lambda x: x * 2)
uniform_discr(-1.0, 1.0, 4).element([-1.5, -0.5, 0.5, 1.5])
This works also with parameterized functions, however only
through keyword arguments (not positional arguments with
defaults):
>>> def f(x, c=0.0):
... return np.maximum(x, c)
...
>>> space = odl.uniform_discr(-1, 1, 4)
>>> space.element(f, c=0.5)
uniform_discr(-1.0, 1.0, 4).element([ 0.5 , 0.5 , 0.5 , 0.75])
See Also
--------
sampling : create a discrete element from a non-discretized one
|
27,433 |
def find_nearby_pores(self, pores, r, flatten=False, include_input=False):
r
pores = self._parse_indices(pores)
if sp.size(pores) == 0:
return sp.array([], dtype=sp.int64)
if r <= 0:
raise Exception()
kd = sptl.cKDTree(self[])
kd_pores = sptl.cKDTree(self[][pores])
Ps_within_r = kd_pores.query_ball_tree(kd, r=r)
for i in range(len(Ps_within_r)):
Ps_within_r[i].remove(pores[i])
temp = sp.concatenate((Ps_within_r))
Pn = sp.unique(temp).astype(sp.int64)
if include_input is False:
Pn = Pn[~sp.in1d(Pn, pores)]
if flatten is False:
if len(Pn) == 0:
Pn = [sp.array([], dtype=sp.int64) for i in pores]
else:
mask = sp.zeros(shape=sp.amax((Pn.max(), pores.max()))+1,
dtype=bool)
mask[Pn] = True
temp = []
for item in Ps_within_r:
temp.append(sp.array(item, dtype=sp.int64)[mask[item]])
Pn = temp
return Pn
|
r"""
Find all pores within a given radial distance of the input pore(s)
regardless of whether or not they are toplogically connected.
Parameters
----------
pores : array_like
The list of pores for whom nearby neighbors are to be found
r : scalar
The maximum radius within which the search should be performed
include_input : bool
Controls whether the input pores should be included in the returned
list. The default is ``False``.
flatten : bool
If true returns a single list of all pores that match the criteria,
otherwise returns an array containing a sub-array for each input
pore, where each sub-array contains the pores that are nearby to
each given input pore. The default is False.
Returns
-------
A list of pores which are within the given spatial distance. If a
list of N pores is supplied, then a an N-long list of such lists is
returned. The returned lists each contain the pore for which the
neighbors were sought.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[3, 3, 3])
>>> Ps = pn.find_nearby_pores(pores=[0, 1], r=1)
>>> print(Ps)
[array([3, 9]), array([ 2, 4, 10])]
>>> Ps = pn.find_nearby_pores(pores=[0, 1], r=0.5)
>>> print(Ps)
[array([], dtype=int64), array([], dtype=int64)]
>>> Ps = pn.find_nearby_pores(pores=[0, 1], r=1, flatten=True)
>>> print(Ps)
[ 2 3 4 9 10]
|
27,434 |
def parse_roadmap_gwas(fn):
df = pd.read_table(fn, low_memory=False,
names=[, , , , ])
df = df[df.pvalue < 1e-5]
df = df.sort(columns=[, , ])
df = df.drop_duplicates(subset=[, ])
df = df[df[] != ]
df.index = df[].astype(str) + + df[].astype(str)
return df
|
Read Roadmap GWAS file and filter for unique, significant (p < 1e-5)
SNPs.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
|
27,435 |
def is_up_url(url, allow_redirects=False, timeout=5):
r
if not isinstance(url, basestring) or not in url:
return False
normalized_url = prepend_http(url)
session = requests.Session()
session.mount(url, HTTPAdapter(max_retries=2))
try:
resp = session.get(normalized_url, allow_redirects=allow_redirects, timeout=timeout)
except ConnectionError:
return None
except:
return None
if resp.status_code in (301, 302, 307) or resp.headers.get(, None):
return resp.headers.get(, None)
elif 100 <= resp.status_code < 400:
return normalized_url
else:
return False
|
r""" Check URL to see if it is a valid web page, return the redirected location if it is
Returns:
None if ConnectionError
False if url is invalid (any HTTP error code)
cleaned up URL (following redirects and possibly adding HTTP schema "http://")
>>> is_up_url("duckduckgo.com") # a more private, less manipulative search engine
'https://duckduckgo.com/'
>>> urlisup = is_up_url("totalgood.org")
>>> not urlisup or str(urlisup).startswith('http')
True
>>> urlisup = is_up_url("wikipedia.org")
>>> str(urlisup).startswith('http')
True
>>> 'wikipedia.org' in str(urlisup)
True
>>> bool(is_up_url('8158989668202919656'))
False
>>> is_up_url('invalidurlwithoutadomain')
False
|
27,436 |
def identifier_director(**kwargs):
ark = kwargs.get(, None)
domain_name = kwargs.get(, None)
scheme = kwargs.get() or
qualifier = kwargs.get(, None)
content = kwargs.get(, )
if ark and qualifier == :
content = % ark
if domain_name and ark and qualifier == :
if not domain_name.endswith():
domain_name +=
permalink_url = % (scheme, domain_name, ark)
if not permalink_url.endswith():
permalink_url +=
content = permalink_url
else:
if qualifier:
content = % (string.lower(qualifier), content)
return DCIdentifier(content=content)
|
Direct how to handle the identifier element.
|
27,437 |
def _flush_aggregated_objects(self):
if len(self.aggregated_objects) == 0:
return 0
number_of_aggregated_objects = len(self.aggregated_objects)
self.logger.info(.format(number_of_aggregated_objects))
for key in self.aggregated_objects:
document = self.aggregated_objects[key]
mongo_pk = self._mongo_sink_key(*key)
self.ds.update(self.sink, mongo_pk, document)
self.logger.info()
del self.aggregated_objects
self.aggregated_objects = dict()
gc.collect()
return number_of_aggregated_objects
|
method inserts aggregated objects into MongoDB
:return number_of_aggregated_objects
|
27,438 |
def create_add_on(self, add_on):
url = urljoin(self._url, )
return add_on.post(url)
|
Make the given `AddOn` available to subscribers on this plan.
|
27,439 |
def IsFile(path):
s doc for performance issues information
ftp':
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(url.scheme)
|
:param unicode path:
Path to a file (local or ftp)
:raises NotImplementedProtocol:
If checking for a non-local, non-ftp file
:rtype: bool
:returns:
True if the file exists
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
|
27,440 |
def get_file_contents(self, project, provider_name, service_endpoint_id=None, repository=None, commit_or_branch=None, path=None, **kwargs):
route_values = {}
if project is not None:
route_values[] = self._serialize.url(, project, )
if provider_name is not None:
route_values[] = self._serialize.url(, provider_name, )
query_parameters = {}
if service_endpoint_id is not None:
query_parameters[] = self._serialize.query(, service_endpoint_id, )
if repository is not None:
query_parameters[] = self._serialize.query(, repository, )
if commit_or_branch is not None:
query_parameters[] = self._serialize.query(, commit_or_branch, )
if path is not None:
query_parameters[] = self._serialize.query(, path, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters,
accept_media_type=)
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
|
GetFileContents.
[Preview API] Gets the contents of a file in the given source code repository.
:param str project: Project ID or project name
:param str provider_name: The name of the source provider.
:param str service_endpoint_id: If specified, the ID of the service endpoint to query. Can only be omitted for providers that do not use service endpoints, e.g. TFVC or TFGit.
:param str repository: If specified, the vendor-specific identifier or the name of the repository to get branches. Can only be omitted for providers that do not support multiple repositories.
:param str commit_or_branch: The identifier of the commit or branch from which a file's contents are retrieved.
:param str path: The path to the file to retrieve, relative to the root of the repository.
:rtype: object
|
27,441 |
def _process_results():
async = get_current_async()
callbacks = async.get_callbacks()
if not isinstance(async.result.payload, AsyncException):
callback = callbacks.get()
else:
callback = callbacks.get()
if not callback:
raise async.result.payload.exception, None, \
async.result.payload.traceback[2]
return _execute_callback(async, callback)
|
Process the results from an Async job.
|
27,442 |
def timesince(self, now=None):
return djtimesince(self.timestamp, now).encode().replace(b, b).decode()
|
Shortcut for the ``django.utils.timesince.timesince`` function of the
current timestamp.
|
27,443 |
def matching_fpaths(dpath_list, include_patterns, exclude_dirs=[],
greater_exclude_dirs=[], exclude_patterns=[],
recursive=True):
r
if isinstance(dpath_list, six.string_types):
dpath_list = [dpath_list]
for dpath in dpath_list:
for root, dname_list, fname_list in os.walk(dpath):
subdirs = pathsplit_full(relpath(root, dpath))
if any([dir_ in greater_exclude_dirs for dir_ in subdirs]):
continue
if basename(root) in exclude_dirs:
continue
_match = fnmatch.fnmatch
for name in fname_list:
if any(_match(name, pat) for pat in include_patterns):
if not any(_match(name, pat) for pat in exclude_patterns):
fpath = join(root, name)
yield fpath
if not recursive:
break
|
r"""
walks dpath lists returning all directories that match the requested
pattern.
Args:
dpath_list (list):
include_patterns (str):
exclude_dirs (None):
recursive (bool):
References:
# TODO: fix names and behavior of exclude_dirs and greater_exclude_dirs
http://stackoverflow.com/questions/19859840/excluding-directories-in-os-walk
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath_list = [dirname(dirname(ut.__file__))]
>>> include_patterns = get_standard_include_patterns()
>>> exclude_dirs = ['_page']
>>> greater_exclude_dirs = get_standard_exclude_dnames()
>>> recursive = True
>>> fpath_gen = matching_fpaths(dpath_list, include_patterns, exclude_dirs,
>>> greater_exclude_dirs, recursive)
>>> result = list(fpath_gen)
>>> print('\n'.join(result))
|
27,444 |
def resplit(prev, pattern, *args, **kw):
maxsplit = 0 if not in kw else kw.pop()
pattern_obj = re.compile(pattern, *args, **kw)
for s in prev:
yield pattern_obj.split(s, maxsplit=maxsplit)
|
The resplit pipe split previous pipe input by regular expression.
Use 'maxsplit' keyword argument to limit the number of split.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern which used to split string.
:type pattern: str|unicode
|
27,445 |
def send_request(self, request):
request_id = next(self._id_counter)
message = self._protocol.request_message(request, request_id)
return message, self._event(request, request_id)
|
Send a Request. Return a (message, event) pair.
The message is an unframed message to send over the network.
Wait on the event for the response; which will be in the
"result" attribute.
Raises: ProtocolError if the request violates the protocol
in some way..
|
27,446 |
def vars(self):
if self._vars is None:
self._vars = NNTreeNodeVars(self)
return self._vars
|
Alternative naming, you can use `node.vars.name` instead of `node.v_name`
|
27,447 |
def _dt_to_epoch_ns(dt_series):
index = pd.to_datetime(dt_series.values)
if index.tzinfo is None:
index = index.tz_localize()
else:
index = index.tz_convert()
return index.view(np.int64)
|
Convert a timeseries into an Int64Index of nanoseconds since the epoch.
Parameters
----------
dt_series : pd.Series
The timeseries to convert.
Returns
-------
idx : pd.Int64Index
The index converted to nanoseconds since the epoch.
|
27,448 |
def to_one(dest_class, type=RelationType.DIRECT, resource_classes=None,
reverse=None, reverse_type=RelationType.DIRECT,
writable=False):
def method_builder(cls):
dest_resource_type = dest_class._resource_type()
dest_method_name = dest_resource_type.replace(, )
doc_variables = {
: cls.__name__,
: dest_class.__name__,
: dest_method_name
}
fetch_method_doc = .format(cls.__name__, dest_class.__name__, dest_method_name)
def _fetch_relationship_included(self):
session = self._session
include = self._include
if include is None or dest_class not in include:
error = "{} was not included".format(dest_class.__name__)
raise AttributeError(error)
included = self._included.get(dest_resource_type)
if len(included) == 0:
return None
mk_one = dest_class._mk_one(session,
resource_classes=resource_classes)
return mk_one({
: included[0]
})
def fetch_relationship_direct(self, use_included=False):
if use_included:
return _fetch_relationship_included(self)
session = self._session
id = None if self.is_singleton() else self.id
url = session._build_url(self._resource_path(), id,
dest_resource_type)
process = dest_class._mk_one(session,
resource_classes=resource_classes)
return session.get(url, CB.json(200, process))
def fetch_relationship_include(self, use_included=False):
if use_included:
return _fetch_relationship_included(self)
session = self._session
id = None if self.is_singleton() else self.id
url = session._build_url(self._resource_path(), id)
params = build_request_include([dest_class], None)
def _process(json):
included = json.get()
if len(included) == 0:
return None
mk_one = dest_class._mk_one(session,
resource_classes=resource_classes)
return mk_one({
: included[0]
})
return session.get(url, CB.json(200, _process),
params=params)
if type == RelationType.DIRECT:
fetch_relationship = fetch_relationship_direct
elif type == RelationType.INCLUDE:
fetch_relationship = fetch_relationship_include
else:
raise ValueError("Invalid RelationType: {}".format(type))
fetch_relationship.__doc__ = fetch_method_doc
def update_method(self, resource):
session, url, json = _build_relatonship(self, dest_resource_type,
resource)
return session.patch(url, CB.boolean(200), json=json)
methods = [(dest_method_name, fetch_relationship)]
if writable:
methods.extend([
(.format(dest_method_name), update_method)
])
for name, method in methods:
method.__doc__ = method.__doc__.format(**doc_variables)
setattr(cls, name, method)
if reverse is not None:
reverse(cls, type=reverse_type)(dest_class)
return cls
return method_builder
|
Create a one to one relation to a given target :class:`Resource`.
Args:
dest_class(Resource): The *target* class for the relationship
Keyword Args:
type(RelationType): The relationship approach to use.
reverse(to_may or to_one): An *optional* reverse relationship.
reverse_type(RelationType): The reverse relationship approach.
resource_classes(Resource): The kinds of Resources to expect
in the relationship
Returns:
A builder function which, given a source class creates a
one-to-one relationship with the target
A one to one relationship means that you can get the associated
target object from the object on which the ``to_one`` was declared.
.. code-block:: python
@to_one(Organization)
def User(Resource):
pass
Declares that a User is associated with *one* Organization. The
decorator automatically adds a method to fetch the associated
organization:
.. code-block:: python
org = user.organization()
|
27,449 |
def _comparator_approximate_star(filter_value, tested_value):
lower_filter_value = filter_value.lower()
if is_string(tested_value):
return _comparator_star(lower_filter_value, tested_value.lower())
elif hasattr(tested_value, "__iter__"):
new_tested = [
value.lower() for value in tested_value if is_string(value)
]
if _comparator_star(lower_filter_value, new_tested):
return True
return _comparator_star(filter_value, tested_value) or _comparator_star(
lower_filter_value, tested_value
)
|
Tests if the filter value, which contains a joker, is nearly equal to the
tested value.
If the tested value is a string or an array of string, it compares their
lower case forms
|
27,450 |
def compile_patterns_in_dictionary(dictionary):
for key, value in dictionary.items():
if isinstance(value, str):
dictionary[key] = re.compile(value)
elif isinstance(value, dict):
compile_patterns_in_dictionary(value)
return dictionary
|
Replace all strings in dictionary with compiled
version of themselves and return dictionary.
|
27,451 |
def setMaxSpeedLat(self, typeID, speed):
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_MAXSPEED_LAT, typeID, speed)
|
setMaxSpeedLat(string, double) -> None
Sets the maximum lateral speed of this type.
|
27,452 |
def set_advanced_configs(vm_name, datacenter, advanced_configs,
service_instance=None):
current_config = get_vm_config(vm_name,
datacenter=datacenter,
objects=True,
service_instance=service_instance)
diffs = compare_vm_configs({: vm_name,
: advanced_configs},
current_config)
datacenter_ref = salt.utils.vmware.get_datacenter(service_instance,
datacenter)
vm_ref = salt.utils.vmware.get_mor_by_property(service_instance,
vim.VirtualMachine,
vm_name,
property_name=,
container_ref=datacenter_ref)
config_spec = vim.vm.ConfigSpec()
changes = diffs[].diffs
_apply_advanced_config(config_spec, diffs[].new_values,
vm_ref.config.extraConfig)
if changes:
salt.utils.vmware.update_vm(vm_ref, config_spec)
return {: changes}
|
Appends extra config parameters to a virtual machine advanced config list
vm_name
Virtual machine name
datacenter
Datacenter name where the virtual machine is available
advanced_configs
Dictionary with advanced parameter key value pairs
service_instance
vCenter service instance for connection and configuration
|
27,453 |
def shuffled_batches(self, batch_size):
if batch_size >= self.size:
yield self
else:
batch_splits = math_util.divide_ceiling(self.size, batch_size)
indices = list(range(self.size))
np.random.shuffle(indices)
for sub_indices in np.array_split(indices, batch_splits):
yield Transitions(
size=len(sub_indices),
environment_information=None,
transition_tensors={k: v[sub_indices] for k, v in self.transition_tensors.items()}
)
|
Generate randomized batches of data
|
27,454 |
def load_image(file) -> DataAndMetadata.DataAndMetadata:
if isinstance(file, str) or isinstance(file, str):
with open(file, "rb") as f:
return load_image(f)
dmtag = parse_dm3.parse_dm_header(file)
dmtag = fix_strings(dmtag)
img_index = -1
image_tags = dmtag[][img_index]
data = imagedatadict_to_ndarray(image_tags[])
calibrations = []
calibration_tags = image_tags[].get(, dict())
for dimension in calibration_tags.get(, list()):
origin, scale, units = dimension.get(, 0.0), dimension.get(, 1.0), dimension.get(, str())
calibrations.append((-origin * scale, scale, units))
calibrations = tuple(reversed(calibrations))
if len(data.shape) == 3 and data.dtype != numpy.uint8:
if image_tags[].get(, dict()).get("Format", str()).lower() in ("spectrum", "spectrum image"):
if data.shape[1] == 1:
data = numpy.squeeze(data, 1)
data = numpy.moveaxis(data, 0, 1)
data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 1)
calibrations = (calibrations[2], calibrations[0])
else:
data = numpy.moveaxis(data, 0, 2)
data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 1)
calibrations = tuple(calibrations[1:]) + (calibrations[0],)
else:
data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 2)
elif len(data.shape) == 4 and data.dtype != numpy.uint8:
data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 2)
elif data.dtype == numpy.uint8:
data_descriptor = DataAndMetadata.DataDescriptor(False, 0, len(data.shape[:-1]))
else:
data_descriptor = DataAndMetadata.DataDescriptor(False, 0, len(data.shape))
brightness = calibration_tags.get(, dict())
origin, scale, units = brightness.get(, 0.0), brightness.get(, 1.0), brightness.get(, str())
intensity = -origin * scale, scale, units
timestamp = None
timezone = None
timezone_offset = None
title = image_tags.get()
properties = dict()
if in image_tags:
voltage = image_tags[].get(, dict()).get(, dict())
if voltage:
properties.setdefault("hardware_source", dict())["autostem"] = { "high_tension_v": float(voltage) }
dm_metadata_signal = image_tags[].get(, dict()).get()
if dm_metadata_signal and dm_metadata_signal.lower() == "eels":
properties.setdefault("hardware_source", dict())["signal_type"] = dm_metadata_signal
if image_tags[].get(, dict()).get("Format", str()).lower() in ("spectrum", "spectrum image"):
data_descriptor.collection_dimension_count += data_descriptor.datum_dimension_count - 1
data_descriptor.datum_dimension_count = 1
if image_tags[].get(, dict()).get("IsSequence", False) and data_descriptor.collection_dimension_count > 0:
data_descriptor.is_sequence = True
data_descriptor.collection_dimension_count -= 1
timestamp_str = image_tags[].get("Timestamp")
if timestamp_str:
timestamp = get_datetime_from_timestamp_str(timestamp_str)
timezone = image_tags[].get("Timezone")
timezone_offset = image_tags[].get("TimezoneOffset")
image_tags[].pop("Timestamp", None)
image_tags[].pop("Timezone", None)
image_tags[].pop("TimezoneOffset", None)
properties.update(image_tags[])
dimensional_calibrations = [Calibration.Calibration(c[0], c[1], c[2]) for c in calibrations]
while len(dimensional_calibrations) < data_descriptor.expected_dimension_count:
dimensional_calibrations.append(Calibration.Calibration())
intensity_calibration = Calibration.Calibration(intensity[0], intensity[1], intensity[2])
return DataAndMetadata.new_data_and_metadata(data,
data_descriptor=data_descriptor,
dimensional_calibrations=dimensional_calibrations,
intensity_calibration=intensity_calibration,
metadata=properties,
timestamp=timestamp,
timezone=timezone,
timezone_offset=timezone_offset)
|
Loads the image from the file-like object or string file.
If file is a string, the file is opened and then read.
Returns a numpy ndarray of our best guess for the most important image
in the file.
|
27,455 |
def get_user_group(self, user=None, group=None):
user = user or os.getuid()
try:
try:
user = pwd.getpwuid(int(user))
except ValueError:
user = pwd.getpwnam(user)
except KeyError as ex:
self.logger.fatal("could not resolve user: %s", ex)
raise
group = group or user.pw_gid
try:
try:
group = grp.getgrgid(int(group))
except ValueError:
group = grp.getgrnam(group)
except KeyError as ex:
self.logger.fatal("could not resolve group:%s", ex)
raise
return user, group
|
Get the user and group information.
Parameters
----------
user : str
User name or user id (default is the `os.getuid()`).
group : str
Group name or group id (default is the group of `user`).
Returns
-------
user : pwd.struct_passwd
User object.
group : grp.struct_group
Group object.
|
27,456 |
def get_single(self, key, lang=None):
if not isinstance(key, URIRef):
key = URIRef(key)
if lang is not None:
default = None
for o in self.graph.objects(self.asNode(), key):
default = o
if o.language == lang:
return o
return default
else:
for o in self.graph.objects(self.asNode(), key):
return o
|
Returns a single triple related to this node.
:param key: Predicate of the triple
:param lang: Language of the triple if applicable
:rtype: Literal or BNode or URIRef
|
27,457 |
def auto_find_instance_path(self) -> Path:
prefix, package_path = find_package(self.import_name)
if prefix is None:
return package_path / "instance"
return prefix / "var" / f"{self.name}-instance"
|
Locates the instace_path if it was not provided
|
27,458 |
def _find_volumes(self, volume_system, vstype=):
try:
import pytsk3
except ImportError:
logger.error("pytsk3 not installed, could not detect volumes")
raise ModuleNotFoundError("pytsk3")
baseimage = None
try:
raw_path = volume_system.parent.get_raw_path()
try:
baseimage = pytsk3.Img_Info(raw_path)
except Exception:
logger.error("Failed retrieving image info (possible empty image).", exc_info=True)
return []
try:
volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, + vstype.upper()),
volume_system.parent.offset // volume_system.disk.block_size)
volume_system.volume_source =
return volumes
except Exception as e:
if "(GPT or DOS at 0)" in str(e) and vstype != :
volume_system.vstype =
try:
logger.warning("Error in retrieving volume info: TSK couldnTSK_VS_TYPE_GPTmulti'
return volumes
except Exception as e:
logger.exception("Failed retrieving image info (possible empty image).")
raise SubsystemError(e)
else:
logger.exception("Failed retrieving image info (possible empty image).")
raise SubsystemError(e)
finally:
if baseimage:
baseimage.close()
del baseimage
|
Finds all volumes based on the pytsk3 library.
|
27,459 |
def sort_top_level_items(self, key):
self.save_expanded_state()
items = sorted([self.takeTopLevelItem(0)
for index in range(self.topLevelItemCount())], key=key)
for index, item in enumerate(items):
self.insertTopLevelItem(index, item)
self.restore_expanded_state()
|
Sorting tree wrt top level items
|
27,460 |
def _validate_argument(self, arg):
if arg is None:
return arg
if isinstance(arg, type):
return InstanceOf(arg)
if not isinstance(arg, BaseMatcher):
raise TypeError(
"argument of %s can be a type or a matcher (got %r)" % (
self.__class__.__name__, type(arg)))
return arg
|
Validate a type or matcher argument to the constructor.
|
27,461 |
def src_to_dst(self, src_uri):
m = re.match(self.src_uri + "/(.*)$", src_uri)
if (m is None):
return(None)
rel_path = m.group(1)
return(self.dst_path + + rel_path)
|
Return the dst filepath from the src URI.
Returns None on failure, destination path on success.
|
27,462 |
async def asgi_send(self, message: dict) -> None:
if message["type"] == "websocket.accept" and self.state == ASGIWebsocketState.HANDSHAKE:
headers = build_and_validate_headers(message.get("headers", []))
raise_if_subprotocol_present(headers)
headers.extend(self.response_headers())
await self.asend(
AcceptConnection(
extensions=[PerMessageDeflate()],
extra_headers=headers,
subprotocol=message.get("subprotocol"),
)
)
self.state = ASGIWebsocketState.CONNECTED
self.config.access_logger.access(
self.scope, {"status": 101, "headers": []}, time() - self.start_time
)
elif (
message["type"] == "websocket.http.response.start"
and self.state == ASGIWebsocketState.HANDSHAKE
):
self.response = message
self.config.access_logger.access(self.scope, self.response, time() - self.start_time)
elif message["type"] == "websocket.http.response.body" and self.state in {
ASGIWebsocketState.HANDSHAKE,
ASGIWebsocketState.RESPONSE,
}:
await self._asgi_send_rejection(message)
elif message["type"] == "websocket.send" and self.state == ASGIWebsocketState.CONNECTED:
data: Union[bytes, str]
if message.get("bytes") is not None:
await self.asend(BytesMessage(data=bytes(message["bytes"])))
elif not isinstance(message["text"], str):
raise TypeError(f"{message[]} should be a str")
else:
await self.asend(TextMessage(data=message["text"]))
elif message["type"] == "websocket.close" and self.state == ASGIWebsocketState.HANDSHAKE:
await self.send_http_error(403)
self.state = ASGIWebsocketState.HTTPCLOSED
elif message["type"] == "websocket.close":
await self.asend(CloseConnection(code=int(message["code"])))
self.state = ASGIWebsocketState.CLOSED
else:
raise UnexpectedMessage(self.state, message["type"])
|
Called by the ASGI instance to send a message.
|
27,463 |
def _real_re_compile(self, *args, **kwargs):
try:
return re.compile(*args, **kwargs)
except re.error as e:
raise ValueError( + args[0] + + str(e))
|
Thunk over to the original re.compile
|
27,464 |
def roll_dice(spec):
if spec[0] == : return spec
if spec[0] == :
r = spec[1:]
if len(r) == 2: return (, perform_roll(r[0], r[1]))
k = r[3] if r[2] == else -1
d = r[3] if r[2] == else -1
return (, perform_roll(r[0], r[1], k, d))
if spec[0] == "x":
c = None
roll = None
if spec[1][0] == "c": c = spec[1]
elif spec[1][0] == "r": roll = spec[1]
if spec[2][0] == "c": c = spec[2]
elif spec[2][0] == "r": roll = spec[2]
if (c == None or roll == None):
return (, roll_dice(spec[1]), roll_dice(spec[2]))
else:
if (c[1] > 50):
raise SillyDiceError("I don't have that many dice!")
return ("x", [roll_dice(roll) for i in range(c[1])])
if spec[0] in ops:
return (spec[0], roll_dice(spec[1]), roll_dice(spec[2]))
else: raise ValueError("Invalid dice specification")
|
Perform the dice rolls and replace all roll expressions with lists of
the dice faces that landed up.
|
27,465 |
def SavePrivateKey(self, private_key):
self.private_key = private_key
config.CONFIG.Set("Client.private_key",
self.private_key.SerializeToString())
config.CONFIG.Write()
|
Store the new private key on disk.
|
27,466 |
def rotateAboutVectorMatrix(vec, theta_deg):
ct = np.cos(np.radians(theta_deg))
st = np.sin(np.radians(theta_deg))
vec /= np.linalg.norm(vec)
assert( np.all( np.isfinite(vec)))
term1 = ct * np.eye(3)
ucross = np.zeros( (3,3))
ucross[0] = [0, -vec[2], vec[1]]
ucross[1] = [vec[2], 0, -vec[0]]
ucross[2] = [-vec[1], vec[0], 0]
term2 = st*ucross
ufunny = np.zeros( (3,3))
for i in range(0,3):
for j in range(i,3):
ufunny[i,j] = vec[i]*vec[j]
ufunny[j,i] = ufunny[i,j]
term3 = (1-ct) * ufunny
return term1 + term2 + term3
|
Construct the matrix that rotates vector a about
vector vec by an angle of theta_deg degrees
Taken from
http://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle
Input:
theta_deg (float) Angle through which vectors should be
rotated in degrees
Returns:
A matrix
To rotate a vector, premultiply by this matrix.
To rotate the coord sys underneath the vector, post multiply
|
27,467 |
def get_parser():
parser = argparse.ArgumentParser(
allow_abbrev=True,
description=)
parser.add_argument(,
help=
)
parser.add_argument(dest=,
nargs=,
help=
s context_parser function.--dirworking_dirWorking directory. Use if your pipelines directory is elsewhere. Defaults to cwd.--log--loglevellog_levelInteger log level. Defaults to 20 (INFO). 10=DEBUG\n20=INFO\n30=WARNING\n40=ERROR\n50=CRITICAL.\n Log Level < 10 gives full traceback on errors.--logpathlog_pathLog-file path. Append log output to this path--versionversionEcho version number.{pypyr.version.get_version()}')
return parser
|
Return ArgumentParser for pypyr cli.
|
27,468 |
def get_worker_report(self, with_memory=False):
greenlets = []
for greenlet in list(self.gevent_pool):
g = {}
short_stack = []
stack = traceback.format_stack(greenlet.gr_frame)
for s in stack[1:]:
if "/gevent/hub.py" in s:
break
short_stack.append(s)
g["stack"] = short_stack
job = get_current_job(id(greenlet))
if job:
job.save()
if job.data:
g["path"] = job.data["path"]
g["datestarted"] = job.datestarted
g["id"] = str(job.id)
g["time"] = getattr(greenlet, "_trace_time", 0)
g["switches"] = getattr(greenlet, "_trace_switches", None)
if job._current_io is not None:
g["io"] = job._current_io
greenlets.append(g)
if (not with_memory) or (self.config["add_network_latency"] != "0" and self.config["add_network_latency"]):
cpu = {
"user": 0,
"system": 0,
"percent": 0
}
mem = {"rss": 0, "swap": 0, "total": 0}
else:
cpu_times = self.process.cpu_times()
cpu = {
"user": cpu_times.user,
"system": cpu_times.system,
"percent": self.process.cpu_percent(0)
}
mem = self.get_memory()
whitelisted_config = [
"max_jobs",
"max_memory"
"greenlets",
"processes",
"queues",
"dequeue_strategy",
"scheduler",
"name",
"local_ip",
"external_ip",
"agent_id",
"worker_group"
]
io = None
if self._traced_io:
io = {}
for k, v in iteritems(self._traced_io):
if k == "total":
io[k] = v
else:
io[k] = sorted(list(v.items()), reverse=True, key=lambda x: x[1])
used_pool_slots = len(self.gevent_pool)
used_avg = self.pool_usage_average.next(used_pool_slots)
return {
"status": self.status,
"config": {k: v for k, v in iteritems(self.config) if k in whitelisted_config},
"done_jobs": self.done_jobs,
"usage_avg": used_avg / self.pool_size,
"datestarted": self.datestarted,
"datereported": datetime.datetime.utcnow(),
"name": self.name,
"io": io,
"_id": str(self.id),
"process": {
"pid": self.process.pid,
"cpu": cpu,
"mem": mem
},
"jobs": greenlets
}
|
Returns a dict containing all the data we can about the current status of the worker and
its jobs.
|
27,469 |
def get_files_zip(run_id: int, filetype: _FileType):
data = current_app.config["data"]
dao_runs = data.get_run_dao()
dao_files = data.get_files_dao()
run = dao_runs.get(run_id)
if filetype == _FileType.ARTIFACT:
target_files = run[]
elif filetype == _FileType.SOURCE:
target_files = run[][]
else:
raise Exception("Unknown file type: %s" % filetype)
memory_file = io.BytesIO()
with zipfile.ZipFile(memory_file, ) as zf:
for f in target_files:
file_id = f[] if in f else f[1]
file, filename, upload_date = dao_files.get(file_id)
data = zipfile.ZipInfo(filename, date_time=upload_date.timetuple())
data.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(data, file.read())
memory_file.seek(0)
fn_suffix = _filetype_suffices[filetype]
return send_file(memory_file, attachment_filename=.format(run_id, fn_suffix), as_attachment=True)
|
Send all artifacts or sources of a run as ZIP.
|
27,470 |
def point(self, x, y):
shapeType = POINT
pointShape = Shape(shapeType)
pointShape.points.append([x, y])
self.shape(pointShape)
|
Creates a POINT shape.
|
27,471 |
def _options(self):
if self._options_cache is None:
target_url = self.client.get_url(self._URL_KEY, , )
r = self.client.request(, target_url)
self._options_cache = r.json()
return self._options_cache
|
Returns a raw options object
:rtype: dict
|
27,472 |
def search(self, pattern="*", raw=True, search_raw=True,
output=False):
tosearch = "source_raw" if search_raw else "source"
if output:
tosearch = "history." + tosearch
self.writeout_cache()
return self._run_sql("WHERE %s GLOB ?" % tosearch, (pattern,),
raw=raw, output=output)
|
Search the database using unix glob-style matching (wildcards
* and ?).
Parameters
----------
pattern : str
The wildcarded pattern to match when searching
search_raw : bool
If True, search the raw input, otherwise, the parsed input
raw, output : bool
See :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range`
|
27,473 |
def merge_text_nodes_on(self, node):
if not isinstance(node, ContainerNode) or not node.children:
return
new_children = []
text_run = []
for i in node.children:
if isinstance(i, Text) and not i.translatable:
text_run.append(i.escaped())
else:
if text_run:
new_children.append(EscapedText(.join(text_run)))
text_run = []
new_children.append(i)
if text_run:
new_children.append(EscapedText(.join(text_run)))
node.children = new_children
for i in node.children:
self.merge_text_nodes_on(i)
|
Merges all consecutive non-translatable text nodes into one
|
27,474 |
def start_tcp_client(self, ip=None, port=None, name=None, timeout=None, protocol=None, family=):
self._start_client(TCPClient, ip, port, name, timeout, protocol, family)
|
Starts a new TCP client.
Client can be optionally given `ip` and `port` to bind to, as well as
`name`, default `timeout` and a `protocol`. `family` can be either
ipv4 (default) or ipv6.
You should use `Connect` keyword to connect client to a host.
Examples:
| Start TCP client |
| Start TCP client | name=Client1 | protocol=GTPV2 |
| Start TCP client | 10.10.10.2 | 53 | name=Server1 | protocol=GTPV2 |
| Start TCP client | timeout=5 |
| Start TCP client | 0:0:0:0:0:0:0:1 | 53 | family=ipv6 |
|
27,475 |
def register(self, source_point_cloud, target_point_cloud,
source_normal_cloud, target_normal_cloud, matcher,
num_iterations=1, compute_total_cost=True, match_centroids=False,
vis=False):
if not isinstance(source_point_cloud, PointCloud) or not isinstance(target_point_cloud, PointCloud):
raise ValueError()
if not isinstance(source_normal_cloud, NormalCloud) or not isinstance(target_normal_cloud, NormalCloud):
raise ValueError()
if not isinstance(matcher, PointToPlaneFeatureMatcher):
raise ValueError()
if source_point_cloud.num_points != source_normal_cloud.num_points or target_point_cloud.num_points != target_normal_cloud.num_points:
raise ValueError()
orig_source_points = source_point_cloud.data.T
orig_target_points = target_point_cloud.data.T
orig_source_normals = source_normal_cloud.data.T
orig_target_normals = target_normal_cloud.data.T
normal_norms = np.linalg.norm(orig_target_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_target_points = orig_target_points[valid_inds[0],:]
orig_target_normals = orig_target_normals[valid_inds[0],:]
normal_norms = np.linalg.norm(orig_source_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_source_points = orig_source_points[valid_inds[0],:]
orig_source_normals = orig_source_normals[valid_inds[0],:]
source_mean_point = np.mean(orig_source_points, axis=0)
target_mean_point = np.mean(orig_target_points, axis=0)
R_sol = np.eye(3)
t_sol = np.zeros([3, 1])
if match_centroids:
t_sol[:,0] = target_mean_point - source_mean_point
for i in range(num_iterations):
logging.info( %(i))
source_subsample_inds = np.random.choice(orig_source_points.shape[0], size=self.sample_size_)
source_points = orig_source_points[source_subsample_inds,:]
source_normals = orig_source_normals[source_subsample_inds,:]
target_subsample_inds = np.random.choice(orig_target_points.shape[0], size=self.sample_size_)
target_points = orig_target_points[target_subsample_inds,:]
target_normals = orig_target_normals[target_subsample_inds,:]
source_points = (R_sol.dot(source_points.T) + np.tile(t_sol, [1, source_points.shape[0]])).T
source_normals = (R_sol.dot(source_normals.T)).T
corrs = matcher.match(source_points, target_points, source_normals, target_normals)
valid_corrs = np.where(corrs.index_map != -1)[0]
source_corr_points = corrs.source_points[valid_corrs,:]
target_corr_points = corrs.target_points[corrs.index_map[valid_corrs], :]
target_corr_normals = corrs.target_normals[corrs.index_map[valid_corrs], :]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
logging.warning()
break
A = np.zeros([6,6])
b = np.zeros([6,1])
Ap = np.zeros([6,6])
bp = np.zeros([6,1])
G = np.zeros([3,6])
G[:,3:] = np.eye(3)
for i in range(num_corrs):
s = source_corr_points[i:i+1,:].T
t = target_corr_points[i:i+1,:].T
n = target_corr_normals[i:i+1,:].T
G[:,:3] = skew(s).T
A += G.T.dot(n).dot(n.T).dot(G)
b += G.T.dot(n).dot(n.T).dot(t - s)
Ap += G.T.dot(G)
bp += G.T.dot(t - s)
v = np.linalg.solve(A + self.gamma_*Ap + self.mu_*np.eye(6),
b + self.gamma_*bp)
R = np.eye(3)
R = R + skew(v[:3])
U, S, V = np.linalg.svd(R)
R = U.dot(V)
t = v[3:]
R_sol = R.dot(R_sol)
t_sol = R.dot(t_sol) + t
T_source_target = RigidTransform(R_sol, t_sol, from_frame=source_point_cloud.frame, to_frame=target_point_cloud.frame)
total_cost = 0
source_points = (R_sol.dot(orig_source_points.T) + np.tile(t_sol, [1, orig_source_points.shape[0]])).T
source_normals = (R_sol.dot(orig_source_normals.T)).T
if compute_total_cost:
corrs = matcher.match(source_points, orig_target_points, source_normals, orig_target_normals)
valid_corrs = np.where(corrs.index_map != -1)[0]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
return RegistrationResult(T_source_target, np.inf)
source_corr_points = corrs.source_points[valid_corrs,:]
target_corr_points = corrs.target_points[corrs.index_map[valid_corrs], :]
target_corr_normals = corrs.target_normals[corrs.index_map[valid_corrs], :]
source_target_alignment = np.diag((source_corr_points - target_corr_points).dot(target_corr_normals.T))
point_plane_cost = (1.0 / num_corrs) * np.sum(source_target_alignment * source_target_alignment)
point_dist_cost = (1.0 / num_corrs) * np.sum(np.linalg.norm(source_corr_points - target_corr_points, axis=1)**2)
total_cost = point_plane_cost + self.gamma_ * point_dist_cost
return RegistrationResult(T_source_target, total_cost)
|
Iteratively register objects to one another using a modified version of point to plane ICP.
The cost func is PointToPlane_COST + gamma * PointToPoint_COST.
Uses a `stochastic Gauss-Newton step` where on each iteration a smaller number of points is sampled.
Parameters
----------
source_point_cloud : :obj:`autolab_core.PointCloud`
source object points
target_point_cloud : :obj`autolab_core.PointCloud`
target object points
source_normal_cloud : :obj:`autolab_core.NormalCloud`
source object outward-pointing normals
target_normal_cloud : :obj:`autolab_core.NormalCloud`
target object outward-pointing normals
matcher : :obj:`PointToPlaneFeatureMatcher`
object to match the point sets
num_iterations : int
the number of iterations to run
compute_total_cost : bool
whether or not to compute the total cost upon termination.
match_centroids : bool
whether or not to match the centroids of the point clouds
Returns
-------
:obj`RegistrationResult`
results containing source to target transformation and cost
|
27,476 |
def sys_status_send(self, onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4, force_mavlink1=False):
000), (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_comm : Communication errors (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_count1 : Autopilot-specific errors (uint16_t)
errors_count2 : Autopilot-specific errors (uint16_t)
errors_count3 : Autopilot-specific errors (uint16_t)
errors_count4 : Autopilot-specific errors (uint16_t)
'
return self.send(self.sys_status_encode(onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4), force_mavlink1=force_mavlink1)
|
The general system state. If the system is following the MAVLink
standard, the system state is mainly defined by three
orthogonal states/modes: The system mode, which is
either LOCKED (motors shut down and locked), MANUAL
(system under RC control), GUIDED (system with
autonomous position control, position setpoint
controlled manually) or AUTO (system guided by
path/waypoint planner). The NAV_MODE defined the
current flight state: LIFTOFF (often an open-loop
maneuver), LANDING, WAYPOINTS or VECTOR. This
represents the internal navigation state machine. The
system status shows wether the system is currently
active or not and if an emergency occured. During the
CRITICAL and EMERGENCY states the MAV is still
considered to be active, but should start emergency
procedures autonomously. After a failure occured it
should first move from active to critical to allow
manual intervention and then move to emergency after a
certain timeout.
onboard_control_sensors_present : Bitmask showing which onboard controllers and sensors are present. Value of 0: not present. Value of 1: present. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
onboard_control_sensors_enabled : Bitmask showing which onboard controllers and sensors are enabled: Value of 0: not enabled. Value of 1: enabled. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
onboard_control_sensors_health : Bitmask showing which onboard controllers and sensors are operational or have an error: Value of 0: not enabled. Value of 1: enabled. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
load : Maximum usage in percent of the mainloop time, (0%: 0, 100%: 1000) should be always below 1000 (uint16_t)
voltage_battery : Battery voltage, in millivolts (1 = 1 millivolt) (uint16_t)
current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t)
battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot estimate the remaining battery (int8_t)
drop_rate_comm : Communication drops in percent, (0%: 0, 100%: 10'000), (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_comm : Communication errors (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_count1 : Autopilot-specific errors (uint16_t)
errors_count2 : Autopilot-specific errors (uint16_t)
errors_count3 : Autopilot-specific errors (uint16_t)
errors_count4 : Autopilot-specific errors (uint16_t)
|
27,477 |
def OnTableChanged(self, event):
if hasattr(event, ):
self.Select(event.table)
self.EnsureVisible(event.table)
event.Skip()
|
Table changed event handler
|
27,478 |
def _write_model(self, specification, specification_set):
filename = "%s%s.py" % (self._class_prefix.lower(), specification.entity_name.lower())
override_content = self._extract_override_content(specification.entity_name)
constants = self._extract_constants(specification)
superclass_name = "NURESTRootObject" if specification.rest_name == self.api_root else "NURESTObject"
self.write(destination=self.output_directory, filename=filename, template_name="model.py.tpl",
specification=specification,
specification_set=specification_set,
version=self.api_version,
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
override_content=override_content,
superclass_name=superclass_name,
constants=constants,
header=self.header_content)
self.model_filenames[filename] = specification.entity_name
|
Write autogenerate specification file
|
27,479 |
def _get_key_location(self, key) -> (int, int):
key = int(key)
if key == 0:
return 1, 0
remainder = key % self.chunkSize
addend = ChunkedFileStore.firstChunkIndex
chunk_no = key - remainder + addend if remainder \
else key - self.chunkSize + addend
offset = remainder or self.chunkSize
return chunk_no, offset
|
Return chunk no and 1-based offset of key
:param key:
:return:
|
27,480 |
def upload(self, filepath, service_path, remove=False):
s
filesystem. If ``remove==True``, the file is moved rather than copied.
If ``filepath`` and ``service_path`` paths are the same, ``upload``
deletes the file if ``remove==True`` and returns.
Parameters
----------
filepath : str
Relative or absolute path to the file to be uploaded on the users
filesystem
remove : bool
If true, the file is moved rather than copied
'
local = OSFS(os.path.dirname(filepath))
if self.fs.hassyspath(service_path) and (
self.fs.getsyspath(service_path) == local.getsyspath(
os.path.basename(filepath))):
if remove:
os.remove(filepath)
return
if not self.fs.isdir(fs.path.dirname(service_path)):
self.fs.makedir(
fs.path.dirname(service_path),
recursive=True,
allow_recreate=True)
if remove:
fs.utils.movefile(
local,
os.path.basename(filepath),
self.fs,
service_path)
else:
fs.utils.copyfile(
local,
os.path.basename(filepath),
self.fs,
service_path)
|
"Upload" a file to a service
This copies a file from the local filesystem into the ``DataService``'s
filesystem. If ``remove==True``, the file is moved rather than copied.
If ``filepath`` and ``service_path`` paths are the same, ``upload``
deletes the file if ``remove==True`` and returns.
Parameters
----------
filepath : str
Relative or absolute path to the file to be uploaded on the user's
filesystem
service_path: str
Path to the destination for the file on the ``DataService``'s
filesystem
remove : bool
If true, the file is moved rather than copied
|
27,481 |
def squeeze(self, trits, offset=0, length=HASH_LENGTH):
if length % HASH_LENGTH != 0:
raise with_context(
exc=ValueError(),
context={
: trits,
: offset,
: length,
})
trits.extend([0] * max(0, length - len(trits)))
if len(trits) - offset < HASH_LENGTH:
raise with_context(
exc=ValueError(),
context={
: trits,
: offset,
: length
},
)
while length >= HASH_LENGTH:
trits[offset:offset + HASH_LENGTH] = self._state[0:HASH_LENGTH]
self._transform()
offset += HASH_LENGTH
length -= HASH_LENGTH
|
Squeeze trits from the sponge.
:param trits:
Sequence that the squeezed trits will be copied to.
Note: this object will be modified!
:param offset:
Starting offset in ``trits``.
:param length:
Number of trits to squeeze, default to ``HASH_LENGTH``
|
27,482 |
def prog(self, s=None):
s = s or self.prog_msg
self.printer(s, end=)
|
Prints the progress indicator
|
27,483 |
def clean_pdb(pdb_file, out_suffix=, outdir=None, force_rerun=False,
remove_atom_alt=True, keep_atom_alt_id=, remove_atom_hydrogen=True, add_atom_occ=True,
remove_res_hetero=True, keep_chemicals=None, keep_res_only=None,
add_chain_id_if_empty=, keep_chains=None):
outfile = ssbio.utils.outfile_maker(inname=pdb_file,
append_to_name=out_suffix,
outdir=outdir,
outext=)
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
my_pdb = StructureIO(pdb_file)
my_cleaner = CleanPDB(remove_atom_alt=remove_atom_alt,
remove_atom_hydrogen=remove_atom_hydrogen,
keep_atom_alt_id=keep_atom_alt_id,
add_atom_occ=add_atom_occ,
remove_res_hetero=remove_res_hetero,
keep_res_only=keep_res_only,
add_chain_id_if_empty=add_chain_id_if_empty,
keep_chains=keep_chains,
keep_chemicals=keep_chemicals)
my_clean_pdb = my_pdb.write_pdb(out_suffix=out_suffix,
out_dir=outdir,
custom_selection=my_cleaner,
force_rerun=force_rerun)
return my_clean_pdb
else:
return outfile
|
Clean a PDB file.
Args:
pdb_file (str): Path to input PDB file
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file
|
27,484 |
def _get_min_max_value(min, max, value=None, step=None):
if value is None:
if min is None or max is None:
raise ValueError(.format(min, max, value))
diff = max - min
value = min + (diff / 2)
if not isinstance(value, type(diff)):
value = min + (diff // 2)
else:
if not isinstance(value, Real):
raise TypeError( % value)
if value == 0:
vrange = (value, value + 1)
elif value > 0:
vrange = (-value, 3*value)
else:
vrange = (3*value, -value)
if min is None:
min = vrange[0]
if max is None:
max = vrange[1]
if step is not None:
tick = int((value - min) / step)
value = min + tick * step
if not min <= value <= max:
raise ValueError(.format(min, value, max))
return min, max, value
|
Return min, max, value given input values with possible None.
|
27,485 |
def n_p(self):
return 2*_sltr.GeV2joule(self.E)*_spc.epsilon_0 / (self.beta*_spc.elementary_charge)**2
|
The plasma density in SI units.
|
27,486 |
def metadata_sorter(x, y):
if x == y:
return 0
if x in METADATA_SORTER_FIRST and y in METADATA_SORTER_FIRST:
return -1 if METADATA_SORTER_FIRST.index(x) < METADATA_SORTER_FIRST.index(y) else 1
elif x in METADATA_SORTER_FIRST:
return -1
elif y in METADATA_SORTER_FIRST:
return 1
else:
if x.startswith() and y.startswith():
return cmp(x[1:], y[1:])
elif x.startswith():
return 1
elif y.startswith():
return -1
else:
return cmp(x, y)
|
Sort metadata keys by priority.
|
27,487 |
def _init_base_objects(self, ssl_version: OpenSslVersionEnum, underlying_socket: Optional[socket.socket]) -> None:
self._is_handshake_completed = False
self._ssl_version = ssl_version
self._ssl_ctx = self._NASSL_MODULE.SSL_CTX(ssl_version.value)
self._sock = underlying_socket
|
Setup the socket and SSL_CTX objects.
|
27,488 |
def correspond(text):
subproc.stdin.write(text)
subproc.stdin.flush()
return drain()
|
Communicate with the child process without closing stdin.
|
27,489 |
def rate_limit(limit: int, key=None):
def decorator(func):
setattr(func, , limit)
if key:
setattr(func, , key)
return func
return decorator
|
Decorator for configuring rate limit and key in different functions.
:param limit:
:param key:
:return:
|
27,490 |
def name_parts(self):
default = PartialMixin.ANY
return ([(k, default, True)
for k, _, _ in PartitionName._name_parts]
+
[(k, default, True)
for k, _, _ in Name._generated_names]
)
|
Works with PartialNameMixin.clear_dict to set NONE and ANY
values.
|
27,491 |
def compute_integrated_acquisition(acquisition,x):
acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]
acqu_x += acquisition.acquisition_function(x)
acqu_x = acqu_x/acquisition.model.num_hmc_samples
return acqu_x
|
Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
|
27,492 |
def shutdown(self):
logging.info()
for func in self.shutdown_cleanup.values():
func()
session_time = round((time.time() - self.start_time)/60, 0)
logging.info(.format(session_time))
logging.info()
|
shutdown operations on exit, this is run by
a finaly statement after the tkinter mainloop ends
call root.quit to get here,
Note you will still need to call sys.exit()
|
27,493 |
def dist_dir(self):
if self.distribution is None:
warning(
.format(self, self))
exit(1)
return self.distribution.dist_dir
|
The dist dir at which to place the finished distribution.
|
27,494 |
def cds_identifier_validator(record, result):
record_external_identifiers = get_value(record, , [])
result_external_identifiers = get_value(result, , [])
record_external_identifiers = {external_id["value"] for external_id in record_external_identifiers if external_id["schema"] == }
result_external_identifiers = {external_id["value"] for external_id in result_external_identifiers if external_id["schema"] == }
return bool(record_external_identifiers & result_external_identifiers)
|
Ensure that the two records have the same CDS identifier.
This is needed because the search is done only for
``external_system_identifiers.value``, which might cause false positives in
case the matched record has an identifier with the same ``value`` but
``schema`` different from CDS.
Args:
record (dict): the given record we are trying to match with similar ones in INSPIRE.
result (dict): possible match returned by the ES query that needs to be validated.
Returns:
bool: validation decision.
|
27,495 |
def decr(self, stat, count=1, rate=1):
self.incr(stat, -count, rate)
|
Decrement a stat by `count`.
|
27,496 |
def getSupportedServices(self, only_uids=True):
uids = map(lambda range: range[], self.getReferenceResults())
uids = filter(api.is_uid, uids)
if only_uids:
return uids
brains = api.search({: uids}, )
return map(api.get_object, brains)
|
Return a list with the services supported by this reference sample,
those for which there is a valid results range assigned in reference
results
:param only_uids: returns a list of uids or a list of objects
:return: list of uids or AnalysisService objects
|
27,497 |
def create_api_key(self, api_id, stage_name):
response = self.apigateway_client.create_api_key(
name=.format(stage_name, api_id),
description=.format(api_id),
enabled=True,
stageKeys=[
{
: .format(api_id),
: .format(stage_name)
},
]
)
print(.format(response[]))
|
Create new API key and link it with an api_id and a stage_name
|
27,498 |
def accept_all(self):
keys = self.list_keys()
for key in keys[self.PEND]:
try:
shutil.move(
os.path.join(
self.opts[],
self.PEND,
key),
os.path.join(
self.opts[],
self.ACC,
key)
)
eload = {: True,
: ,
: key}
self.event.fire_event(eload,
salt.utils.event.tagify(prefix=))
except (IOError, OSError):
pass
return self.list_keys()
|
Accept all keys in pre
|
27,499 |
def get_default(__func: Callable, __arg: str) -> str:
return signature(__func).parameters[__arg].default
|
Fetch default value for a function argument
Args:
__func: Function to inspect
__arg: Argument to extract default value for
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.