Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
376,700 | def import_libs(self, module_names, impl_name):
for name in module_names:
try:
module = __import__(name)
has_module = True
except ImportError:
module = None
has_module = False
setattr(self, name, module)
setattr(self, % name, has_module)
for name in module_names:
try:
return name, __import__(name)
except ImportError:
pass
raise ImportError( % (impl_name, .join(module_names))) | Loop through module_names,
add has_.... booleans to class
set ..._impl to first successful import
:param module_names: list of module names to try importing
:param impl_name: used in error output if no modules succeed
:return: name, module from first successful implementation |
376,701 | def get_default_value(self):
if ( in self.attributes and
bool(self.attributes[].strip())):
return self.attributes[]
else:
return None | Return the default value for the parameter. If here is no default value, return None |
376,702 | def refactor(self, symbol, value):
if value:
self.pset.add(symbol)
else:
self.pset.remove(symbol) | Args:
symbol:
value:
Returns:
None |
376,703 | def start(host=, port=61613, username=, password=):
StompClientFactory.username = username
StompClientFactory.password = password
reactor.connectTCP(host, port, StompClientFactory())
reactor.run() | Start twisted event loop and the fun should begin... |
376,704 | def get_leafs(self, name):
r
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return [node for node in self._get_subtree(name) if self.is_leaf(node)] | r"""
Get the sub-tree leaf node(s).
:param name: Sub-tree root node name
:type name: :ref:`NodeName`
:rtype: list of :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree) |
376,705 | def combine_pdf_as_bytes(pdfs: List[BytesIO]) -> bytes:
writer = PdfWriter()
for pdf in pdfs:
writer.addpages(PdfReader(pdf).pages)
bio = BytesIO()
writer.write(bio)
bio.seek(0)
output = bio.read()
bio.close()
return output | Combine PDFs and return a byte-string with the result.
Arguments
---------
pdfs
A list of BytesIO representations of PDFs |
376,706 | def ratio_value_number_to_time_series_length(self, x):
ratio = feature_calculators.ratio_value_number_to_time_series_length(x)
logging.debug("ratio value number to time series length by tsfresh calculated")
return ratio | As in tsfresh `ratio_value_number_to_time_series_length <https://github.com/blue-yonder/tsfresh/blob/master\
/tsfresh/feature_extraction/feature_calculators.py#L830>`_
Returns a factor which is 1 if all values in the time series occur only once,
and below one if this is not the case.
In principle, it just returns: # unique values / # values
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:rtype: float |
376,707 | def requires_public_key(func):
def func_wrapper(self, *args, **kwargs):
if hasattr(self, "public_key"):
func(self, *args, **kwargs)
else:
self.generate_public_key()
func(self, *args, **kwargs)
return func_wrapper | Decorator for functions that require the public key to be defined. By definition, this includes the private key, as such, it's enough to use this to effect definition of both public and private key. |
376,708 | def footprints_from_point(point, distance, footprint_type=, retain_invalid=False):
bbox = bbox_from_point(point=point, distance=distance)
north, south, east, west = bbox
return create_footprints_gdf(north=north, south=south, east=east, west=west,
footprint_type=footprint_type, retain_invalid=retain_invalid) | Get footprints within some distance north, south, east, and west of
a lat-long point.
Parameters
----------
point : tuple
a lat-long point
distance : numeric
distance in meters
footprint_type : string
type of footprint to be downloaded. OSM tag key e.g. 'building', 'landuse', 'place', etc.
retain_invalid : bool
if False discard any footprints with an invalid geometry
Returns
-------
GeoDataFrame |
376,709 | def _jx_expression(expr, lang):
if is_expression(expr):
new_op = lang[expr.id]
if not new_op:
return language[expr.id].partial_eval()
return expr
if expr is None:
return TRUE
elif expr in (True, False, None) or expr == None or isinstance(expr, (float, int, Decimal, Date)):
return Literal(expr)
elif is_text(expr):
return Variable(expr)
elif is_sequence(expr):
return lang[TupleOp([_jx_expression(e, lang) for e in expr])]
try:
items = items_(expr)
for op, term in items:
full_op = operators.get(op)
if full_op:
class_ = lang.ops[full_op.id]
if class_:
return class_.define(expr)
class_ = language[op.id]
output = class_.define(expr).partial_eval()
return _jx_expression(output, lang)
else:
if not items:
return NULL
raise Log.error("{{instruction|json}} is not known", instruction=items)
except Exception as e:
Log.error("programmer error expr = {{value|quote}}", value=expr, cause=e) | WRAP A JSON EXPRESSION WITH OBJECT REPRESENTATION |
376,710 | def vdp_vlan_change_internal(self, vsw_cb_data, vdp_vlan, fail_reason):
LOG.debug("In VDP VLAN change VLAN %s", vdp_vlan)
if not vsw_cb_data:
LOG.error("NULL vsw_cb_data Info received")
return
net_uuid = vsw_cb_data.get()
port_uuid = vsw_cb_data.get()
lvm = self.local_vlan_map.get(net_uuid)
if not lvm:
LOG.error("Network %s is not in the local vlan map", net_uuid)
return
lldpad_port = self.lldpad_info
if not lldpad_port:
LOG.error("There is no LLDPad port available.")
return
exist_vdp_vlan = lvm.late_binding_vlan
lvid = lvm.vlan
LOG.debug("lvid %(lvid)s exist %(vlan)s",
{: lvid, : exist_vdp_vlan})
lvm.decr_reset_vlan(port_uuid, vdp_vlan)
lvm.set_fail_reason(port_uuid, fail_reason)
self.vdp_vlan_cb(port_uuid, lvid, vdp_vlan, fail_reason)
if vdp_vlan == exist_vdp_vlan:
LOG.debug("No change in provider VLAN %s", vdp_vlan)
return
| Callback Function from VDP when provider VLAN changes.
This will be called only during error cases when switch
reloads or when compute reloads. |
376,711 | def _put_file(self, file):
post_params = {
: file.size,
: file.md5hash(),
: self._get_content_type(file),
}
headers = self._request_headers(, file.prefixed_name, post_params=post_params)
with closing(HTTPConnection(self.netloc)) as conn:
conn.request(, file.prefixed_name, file.read(), headers=headers)
response = conn.getresponse()
if response.status not in (200,):
raise S3IOError(
.format(response.status, response.reason, response.read())) | Send PUT request to S3 with file contents |
376,712 | def reduce(self, colors):
need_reset = False
line = []
for color, items in itertools.groupby(colors):
if color is None:
if need_reset:
line.append("\x1b[49m")
need_reset = False
line.append(self.pad * len(list(items)))
elif color == "EOL":
if need_reset:
line.append("\x1b[49m")
need_reset = False
yield "".join(line)
else:
line.pop()
yield "".join(line)
line = []
else:
need_reset = True
line.append("\x1b[48;5;%dm%s" % (
color, self.pad * len(list(items)))) | Converts color codes into optimized text
This optimizer works by merging adjacent colors so we don't
have to repeat the same escape codes for each pixel. There is
no loss of information.
:param colors: Iterable yielding an xterm color code for each
pixel, None to indicate a transparent pixel, or
``'EOL'`` to indicate th end of a line.
:return: Yields lines of optimized text. |
376,713 | def as_graph(self) -> Digraph:
if Digraph is None:
raise ImportError()
dot = Digraph()
nodes = set()
queue = [self._root]
while queue:
state = queue.pop(0)
if not state.payload:
dot.node(.format(state.id), , {: ( if state else )})
else:
dot.node(.format(state.id), .join(map(str, state.payload)), {: })
for next_state in state.values():
if next_state.id not in nodes:
queue.append(next_state)
nodes.add(state.id)
nodes = set()
queue = [self._root]
while queue:
state = queue.pop(0)
if state.id in nodes:
continue
nodes.add(state.id)
for (label, other) in state.items():
dot.edge(.format(state.id), .format(other.id), _term_str(label))
if other.id not in nodes:
queue.append(other)
return dot | Renders the discrimination net as graphviz digraph. |
376,714 | def get_adjacency_matrix(self, fmt=):
r
if fmt in self._am.keys():
am = self._am[fmt]
elif self._am.keys():
am = self._am[list(self._am.keys())[0]]
tofmt = getattr(am, +fmt)
am = tofmt()
self._am[fmt] = am
else:
am = self.create_adjacency_matrix(weights=self.Ts, fmt=fmt)
self._am[fmt] = am
return am | r"""
Returns an adjacency matrix in the specified sparse format, with 1's
indicating the non-zero values.
Parameters
----------
fmt : string, optional
The sparse storage format to return. Options are:
**'coo'** : (default) This is the native format of OpenPNM data
**'lil'** : Enables row-wise slice of the matrix
**'csr'** : Favored by most linear algebra routines
**'dok'** : Enables subscript access of locations
Notes
-----
This method will only create the requested matrix in the specified
format if one is not already saved on the object. If not present,
this method will create and return the matrix, as well as store it
for future use.
To obtain a matrix with weights other than ones at each non-zero
location use ``create_adjacency_matrix``. |
376,715 | def list_variables(self):
station_codes = self._get_station_codes()
station_codes = self._apply_features_filter(station_codes)
variables = self._list_variables(station_codes)
if hasattr(self, "_variables") and self.variables is not None:
variables.intersection_update(set(self.variables))
return list(variables) | List available variables and applies any filters. |
376,716 | def create_extended_model(model, db_penalty=None, ex_penalty=None,
tp_penalty=None, penalties=None):
model_extended = model.create_metabolic_model()
extra_compartment = model.extracellular_compartment
compartment_ids = set(c.id for c in model.compartments)
if len(compartment_ids) > 0:
logger.info(
.format(
.join(.format(c) for c in compartment_ids)))
db_added = add_all_database_reactions(model_extended, compartment_ids)
else:
logger.warning(
)
db_added = set()
logger.info(
.format(
extra_compartment))
ex_added = add_all_exchange_reactions(
model_extended, extra_compartment, allow_duplicates=True)
boundaries = model.compartment_boundaries
if len(boundaries) > 0:
logger.info(
.format(
.join(.format(c1, c2) for c1, c2 in boundaries)))
tp_added = add_all_transport_reactions(
model_extended, boundaries, allow_duplicates=True)
else:
logger.warning(
)
tp_added = set()
weights = {}
if db_penalty is not None:
weights.update((rxnid, db_penalty) for rxnid in db_added)
if tp_penalty is not None:
weights.update((rxnid, tp_penalty) for rxnid in tp_added)
if ex_penalty is not None:
weights.update((rxnid, ex_penalty) for rxnid in ex_added)
if penalties is not None:
for rxnid, penalty in iteritems(penalties):
weights[rxnid] = penalty
return model_extended, weights | Create an extended model for gap-filling.
Create a :class:`psamm.metabolicmodel.MetabolicModel` with
all reactions added (the reaction database in the model is taken
to be the universal database) and also with artificial exchange
and transport reactions added. Return the extended
:class:`psamm.metabolicmodel.MetabolicModel`
and a weight dictionary for added reactions in that model.
Args:
model: :class:`psamm.datasource.native.NativeModel`.
db_penalty: penalty score for database reactions, default is `None`.
ex_penalty: penalty score for exchange reactions, default is `None`.
tb_penalty: penalty score for transport reactions, default is `None`.
penalties: a dictionary of penalty scores for database reactions. |
376,717 | def sort_by_speedup(self, reverse=True):
self._confs.sort(key=lambda c: c.speedup, reverse=reverse)
return self | Sort the configurations in place. items with highest speedup come first |
376,718 | def _getHead(self, branch):
d = utils.getProcessOutput(self.hgbin,
[, , branch,
+ os.linesep],
path=self._absWorkdir(), env=os.environ, errortoo=False)
@d.addErrback
def no_head_err(exc):
log.err("hgpoller: could not find revision %r in repository %r" % (
branch, self.repourl))
@d.addCallback
def results(heads):
if not heads:
return
if len(heads.split()) > 1:
log.err(("hgpoller: caught several heads in branch %r "
"from repository %r. Staying at previous revision"
"You should wait until the situation is normal again "
"due to a merge or directly strip if remote repo "
"gets stripped later.") % (branch, self.repourl))
return
return heads.strip().decode(self.encoding)
return d | Return a deferred for branch head revision or None.
We'll get an error if there is no head for this branch, which is
probably a good thing, since it's probably a misspelling
(if really buildbotting a branch that does not have any changeset
yet, one shouldn't be surprised to get errors) |
376,719 | def _run_events(self, tag, stage=None):
self._run_event_methods(tag, stage)
self._run_tests(tag, stage) | Run tests marked with a particular tag and stage |
376,720 | def search(self, query_string, **kwargs):
self.index_name = self._index_name_for_language(translation.get_language())
return super(ElasticsearchMultilingualSearchBackend, self).search(query_string, **kwargs) | The main search method
:param query_string: The string to pass to Elasticsearch. e.g. '*:*'
:param kwargs: start_offset, end_offset, result_class
:return: result_class instance |
376,721 | def weeks_per_year(year):
jan1 = jwday(gregorian.to_jd(year, 1, 1))
if jan1 == THU or (jan1 == WED and isleap(year)):
return 53
else:
return 52 | Number of ISO weeks in a year |
376,722 | def _process_op_err(self, e):
if self.is_connecting or self.is_closed or self.is_reconnecting:
return
if self.options["allow_reconnect"] and self.is_connected:
self._status = Client.RECONNECTING
self._ps.reset()
if self._reconnection_task is not None and not self._reconnection_task.cancelled():
self._reconnection_task.cancel()
self._reconnection_task = self._loop.create_task(self._attempt_reconnect())
else:
self._process_disconnect()
self._err = e
yield from self._close(Client.CLOSED, True) | Process errors which occured while reading or parsing
the protocol. If allow_reconnect is enabled it will
try to switch the server to which it is currently connected
otherwise it will disconnect. |
376,723 | def RawData(self):
result = collections.OrderedDict()
i = 0
while True:
try:
name, value, value_type = winreg.EnumValue(self._AccessRootKey(), i)
if value_type == winreg.REG_SZ:
precondition.AssertType(value, Text)
result[name] = value
except OSError:
break
i += 1
return result | Yields the valus in each section. |
376,724 | def top_k_logits(logits, k):
if k == 0:
return logits
else:
values = torch.topk(logits, k)[0]
batch_mins = values[:, -1].view(-1, 1).expand_as(logits)
return torch.where(logits < batch_mins, torch.ones_like(logits) * -1e10, logits) | Masks everything but the k top entries as -infinity (1e10).
Used to mask logits such that e^-infinity -> 0 won't contribute to the
sum of the denominator. |
376,725 | def _get_notify_msg_and_payload(result, stream):
token = stream.advance_past_chars(["=", "*"])
token = int(token) if token != "" else None
logger.debug("%s", fmt_green("parsing message"))
message = stream.advance_past_chars([","])
logger.debug("parsed message")
logger.debug("%s", fmt_green(message))
payload = _parse_dict(stream)
return token, message.strip(), payload | Get notify message and payload dict |
376,726 | def ExportNEP2(self, passphrase):
if len(passphrase) < 2:
raise ValueError("Passphrase must have a minimum of 2 characters")
address_hash_tmp = hashlib.sha256(self.GetAddress().encode("utf-8")).digest()
address_hash_tmp2 = hashlib.sha256(address_hash_tmp).digest()
address_hash = address_hash_tmp2[:4]
pwd_normalized = bytes(unicodedata.normalize(, passphrase), )
derived = scrypt.hash(pwd_normalized, address_hash,
N=SCRYPT_ITERATIONS,
r=SCRYPT_BLOCKSIZE,
p=SCRYPT_PARALLEL_FACTOR,
buflen=SCRYPT_KEY_LEN_BYTES)
derived1 = derived[:32]
derived2 = derived[32:]
xor_ed = xor_bytes(bytes(self.PrivateKey), derived1)
cipher = AES.new(derived2, AES.MODE_ECB)
encrypted = cipher.encrypt(xor_ed)
assembled = bytearray()
assembled.extend(NEP_HEADER)
assembled.extend(NEP_FLAG)
assembled.extend(address_hash)
assembled.extend(encrypted)
encrypted_key_nep2 = base58.b58encode_check(bytes(assembled))
return encrypted_key_nep2.decode("utf-8") | Export the encrypted private key in NEP-2 format.
Args:
passphrase (str): The password to encrypt the private key with, as unicode string
Returns:
str: The NEP-2 encrypted private key |
376,727 | def sg_float(tensor, opt):
r
return tf.cast(tensor, tf.sg_floatx, name=opt.name) | r"""Casts a tensor to floatx.
See `tf.cast()` in tensorflow.
Args:
tensor: A `Tensor` or `SparseTensor` (automatically given by chain).
opt:
name : If provided, it replaces current tensor's name
Returns:
A `Tensor` or `SparseTensor` with same shape as `tensor`. |
376,728 | def submit(args):
gpus = args.gpus.strip().split()
def mthread_submit(nworker, nserver, envs):
procs = {}
for i, gpu in enumerate(gpus):
for j in range(args.num_threads):
procs[i] = Thread(target=exec_cmd, args=(args.command + [%gpu], , i*args.num_threads+j, envs))
procs[i].setDaemon(True)
procs[i].start()
for i in range(len(gpus)*args.num_threads, len(gpus)*args.num_threads + nserver):
procs[i] = Thread(target=exec_cmd, args=(args.command, , i, envs))
procs[i].setDaemon(True)
procs[i].start()
tracker.submit(args.num_threads*len(gpus), args.num_servers, fun_submit=mthread_submit,
pscmd=(.join(args.command))) | Submit function of local jobs. |
376,729 | def get_playlist_songs(self, playlist_id, limit=1000):
url =
csrf =
params = {: playlist_id, : 0, : True,
: limit, : 1000, : csrf}
result = self.post_request(url, params)
songs = result[][]
songs = [Song(song[], song[]) for song in songs]
return songs | Get a playlists's all songs.
:params playlist_id: playlist id.
:params limit: length of result returned by weapi.
:return: a list of Song object. |
376,730 | def Approval(self, username, approval_id):
return ClientApprovalRef(
client_id=self.client_id,
username=username,
approval_id=approval_id,
context=self._context) | Returns a reference to an approval. |
376,731 | def unicode_urlencode(query, doseq=True):
pairs = []
for key, value in query.items():
if isinstance(value, list):
value = list(map(to_utf8, value))
else:
value = to_utf8(value)
pairs.append((to_utf8(key), value))
encoded_query = dict(pairs)
xx = urlencode(encoded_query, doseq)
return xx | Custom wrapper around urlencode to support unicode
Python urlencode doesn't handle unicode well so we need to convert to
bytestrings before using it:
http://stackoverflow.com/questions/6480723/urllib-urlencode-doesnt-like-unicode-values-how-about-this-workaround |
376,732 | def clean_global_runtime_state(reset_subsystem=False):
if reset_subsystem:
Subsystem.reset()
Goal.clear()
BuildConfigInitializer.reset() | Resets the global runtime state of a pants runtime for cleaner forking.
:param bool reset_subsystem: Whether or not to clean Subsystem global state. |
376,733 | def reasonable_desired_version(self, desired_version, allow_equal=False,
allow_patch_skip=False):
try:
desired_version = desired_version.base_version
except:
pass
(new_major, new_minor, new_patch) = \
map(int, desired_version.split())
tag_versions = self._versions_from_tags()
if not tag_versions:
return ""
max_version = max(self._versions_from_tags()).base_version
(old_major, old_minor, old_patch) = \
map(int, str(max_version).split())
update_str = str(max_version) + " -> " + str(desired_version)
v_desired = vers.Version(desired_version)
v_max = vers.Version(max_version)
if allow_equal and v_desired == v_max:
return ""
if v_desired < v_max:
return ("Bad update: New version doesn't increase on last tag: "
+ update_str + "\n")
bad_update = skipped_version((old_major, old_minor, old_patch),
(new_major, new_minor, new_patch),
allow_patch_skip)
msg = ""
if bad_update:
msg = ("Bad update: Did you skip a version from "
+ update_str + "?\n")
return msg | Determine whether the desired version is a reasonable next version.
Parameters
----------
desired_version: str
the proposed next version name |
376,734 | def record(self):
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError()
return struct.pack(self.FMT, self.boot_indicator, self.boot_media_type,
self.load_segment, self.system_type, 0,
self.sector_count, self.load_rba,
self.selection_criteria_type,
self.selection_criteria) | A method to generate a string representing this El Torito Entry.
Parameters:
None.
Returns:
String representing this El Torito Entry. |
376,735 | def verify_constraints(constraints):
if not isinstance(constraints, list):
raise ValueError(
"invalid type returned by make_constraints: %r (must be a list)" % constraints
)
for constraint in constraints:
if not isinstance(constraint, Constraint):
raise ValueError(
"invalid constraint type: %r (must be a Constriant)" % constraint
) | Verify values returned from :meth:`make_constraints`.
Used internally during the :meth:`build` process.
:param constraints: value returned from :meth:`make_constraints`
:type constraints: :class:`list`
:raises ValueError: if verification fails |
376,736 | def _fetch_and_parse_messages(self, mailing_list, from_date):
from_date = datetime_to_utc(from_date)
nmsgs, imsgs, tmsgs = (0, 0, 0)
for mbox in mailing_list.mboxes:
tmp_path = None
try:
tmp_path = self._copy_mbox(mbox)
for message in self.parse_mbox(tmp_path):
tmsgs += 1
if not self._validate_message(message):
imsgs += 1
continue
dt = str_to_datetime(message[MBox.DATE_FIELD])
if dt < from_date:
logger.debug("Message %s sent before %s; skipped",
message[], str(from_date))
tmsgs -= 1
continue
message = self._casedict_to_dict(message)
nmsgs += 1
logger.debug("Message %s parsed", message[])
yield message
except (OSError, EOFError) as e:
logger.warning("Ignoring %s mbox due to: %s", mbox.filepath, str(e))
except Exception as e:
if tmp_path and os.path.exists(tmp_path):
os.remove(tmp_path)
raise e
finally:
if tmp_path and os.path.exists(tmp_path):
os.remove(tmp_path)
logger.info("Done. %s/%s messages fetched; %s ignored",
nmsgs, tmsgs, imsgs) | Fetch and parse the messages from a mailing list |
376,737 | def read_interoperability_ifd(fh, byteorder, dtype, count, offsetsize):
tag_names = {1: }
return read_tags(fh, byteorder, offsetsize, tag_names, maxifds=1) | Read Interoperability tags from file and return as dict. |
376,738 | def make_while_loop(test_and_body_instrs, else_body_instrs, context):
top_of_loop = test_and_body_instrs[0]
test, body_instrs = make_while_loop_test_expr(test_and_body_instrs)
body, orelse_body = make_loop_body_and_orelse(
top_of_loop, body_instrs, else_body_instrs, context,
)
return ast.While(test=test, body=body, orelse=orelse_body) | Make an ast.While node.
Parameters
----------
test_and_body_instrs : deque
Queue of instructions forming the loop test expression and body.
else_body_instrs : deque
Queue of instructions forming the else block of the loop.
context : DecompilationContext |
376,739 | def set_aromatic(self):
for atom in self.atoms:
atom.aromatic = 1
for bond in self.bonds:
bond.aromatic = 1
bond.bondorder = 1.5
bond.bondtype = 4
bond.symbol = ":"
bond.fixed = 1
self.aromatic = 1 | set the cycle to be an aromatic ring |
376,740 | def MessageSetItemDecoder(extensions_by_number):
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
message_set_item_start = pos
type_id = -1
message_start = -1
message_end = -1
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
pos = SkipField(buffer, pos, end, tag_bytes)
if pos == -1:
raise _DecodeError()
if pos > end:
raise _DecodeError()
if type_id == -1:
raise _DecodeError()
if message_start == -1:
raise _DecodeError()
extension = extensions_by_number.get(type_id)
if extension is not None:
value = field_dict.get(extension)
if value is None:
value = field_dict.setdefault(
extension, extension.message_type._concrete_class())
if value._InternalParse(buffer, message_start,message_end) != message_end:
raise _DecodeError()
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append((MESSAGE_SET_ITEM_TAG,
buffer[message_set_item_start:pos]))
return pos
return DecodeItem | Returns a decoder for a MessageSet item.
The parameter is the _extensions_by_number map for the message class.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
} |
376,741 | def _fetchAllChildren(self):
assert self._h5Group is not None, "dataset undefined (file not opened?)"
assert self.canFetchChildren(), "canFetchChildren must be True"
childItems = []
for childName, h5Child in self._h5Group.items():
if isinstance(h5Child, h5py.Group):
childItems.append(H5pyGroupRti(h5Child, nodeName=childName,
fileName=self.fileName))
elif isinstance(h5Child, h5py.Dataset):
if len(h5Child.shape) == 0:
childItems.append(H5pyScalarRti(h5Child, nodeName=childName,
fileName=self.fileName))
else:
childItems.append(H5pyDatasetRti(h5Child, nodeName=childName,
fileName=self.fileName))
elif isinstance(h5Child, h5py.Datatype):
pass
else:
logger.warn("Ignored {}. It has an unexpected HDF-5 type: {}"
.format(childName, type(h5Child)))
return childItems | Fetches all sub groups and variables that this group contains. |
376,742 | def _schema_nodes(self):
name, ext = os.path.splitext(self._ontology_file)
if ext in []:
self._ontology_parser_function = \
lambda s: rdflib.Graph().parse(s, format=)
else:
self._ontology_parser_function = \
lambda s: pyRdfa().graph_from_source(s)
if not self._ontology_parser_function:
raise ValueError(
"No function found to parse ontology. %s" %
self.errorstring_base)
if not self._ontology_file:
raise ValueError(
"No ontology file specified. %s" % self.errorstring_base)
if not self.lexicon:
raise ValueError(
"No lexicon object assigned. %s" % self.errorstring_base)
latest_file = self._read_schema()
try:
self.graph = self._ontology_parser_function(latest_file)
except:
raise IOError("Error parsing ontology at %s" % latest_file)
for subj, pred, obj in self.graph:
self.ontology[subj].append((pred, obj))
yield (subj, pred, obj) | parse self._ontology_file into a graph |
376,743 | def get(self, sid):
return MessageContext(
self._version,
service_sid=self._solution[],
channel_sid=self._solution[],
sid=sid,
) | Constructs a MessageContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.chat.v2.service.channel.message.MessageContext
:rtype: twilio.rest.chat.v2.service.channel.message.MessageContext |
376,744 | def liste_stations(self, station=None, detail=False):
condition = ""
if station:
station = _format(station)
condition = "WHERE IDENTIFIANT IN ()" % station
select = ""
if detail:
select =
_sql = % (select, condition)
return psql.read_sql(_sql, self.conn) | Liste des stations
Paramètres:
station : un nom de station valide (si vide, liste toutes les stations)
detail : si True, affiche plus de détail sur la (les) station(s). |
376,745 | def _wrap_paginated_response(cls, request, response, controls, data,
head=None):
paging_response = response[]
if head is None:
head = response[]
link = cls._build_url(
request,
head=head,
start=paging_response[],
limit=paging_response[])
paging = {}
limit = controls.get()
start = controls.get("start")
paging["limit"] = limit
paging["start"] = start
if paging_response.get("next") == "":
return cls._wrap_response(
request,
data=data,
metadata={
: head,
: link,
: paging
})
next_id = paging_response[]
paging[] = next_id
def build_pg_url(start=None):
return cls._build_url(request, head=head, limit=limit, start=start)
paging[] = build_pg_url(paging_response[])
return cls._wrap_response(
request,
data=data,
metadata={
: head,
: link,
: paging
}) | Builds the metadata for a pagingated response and wraps everying in
a JSON encoded web.Response |
376,746 | def plot_polynomial(
log,
title,
polynomialDict,
orginalDataDictionary=False,
pathToOutputPlotsFolder="~/Desktop",
xRange=False,
xlabel=False,
ylabel=False,
xAxisLimits=False,
yAxisLimits=False,
yAxisInvert=False,
prependNum=False,
legend=False):
import sys
import matplotlib.pyplot as plt
import numpy as np
colors = {
: ,
: ,
: ,
: ,
: ,
: ,
: ,
: ,
:
}
if not xRange:
log.error()
sys.exit(1)
ax = plt.subplot(111)
if len(xRange) == 2:
x = np.arange(xRange[0] * 4, xRange[1] * 4, 1)
x = x / 4.
else:
x = np.arange(xRange[0], xRange[1], xRange[2])
if xAxisLimits:
ax.set_xlim(xAxisLimits[0], xAxisLimits[1])
else:
overShoot = (xRange[1] - xRange[0]) / 10.
ax.set_xlim(xRange[0] - overShoot, xRange[1] + overShoot)
if yAxisLimits:
ax.set_ylim(yAxisLimits[0], yAxisLimits[1])
theseColors = [colors[], colors[
], colors[], colors[]]
count = 0
if orginalDataDictionary:
for name, data in orginalDataDictionary.iteritems():
ax.plot(data[0], data[1], , label=name,
color=theseColors[count])
count += 1
if count == 4:
count = 0
count = 0
for snType, poly in polynomialDict.iteritems():
log.debug( % (x,))
ax.plot(x, poly(x), label= % (snType,), color=theseColors[count])
count += 1
if count == 4:
count = 0
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
if legend:
ax.legend(loc=, bbox_to_anchor=(1, 0.5), prop={: 8})
ax.titlesize =
ax.labelsize =
if xlabel:
plt.xlabel(xlabel, fontsize=)
if ylabel:
plt.ylabel(ylabel, fontsize=)
if title:
plt.title(title, fontsize=,
verticalalignment=, linespacing=0.2)
if yAxisInvert:
ax.invert_yaxis()
if prependNum:
title = "%02d_%s" % (prependNum, title)
thisTitle = title.replace(" ", "_")
thisTitle = thisTitle.replace("-", "_")
fileName = pathToOutputPlotsFolder + thisTitle + ".png"
imageLink = % (thisTitle, fileName)
plt.savefig(fileName)
plt.clf()
return imageLink | *Plot a dictionary of numpy lightcurves polynomials*
**Key Arguments:**
- ``log`` -- logger
- ``title`` -- title for the plot
- ``polynomialDict`` -- dictionary of polynomials { label01 : poly01, label02 : poly02 }
- ``orginalDataDictionary`` -- the orginal data points {name: [x, y]}
- ``pathToOutputPlotsFolder`` -- path the the output folder to save plot to
- ``xRange`` -- the x-range for the polynomial [xmin, xmax, interval]
- ``xlabel`` -- xlabel
- ``ylabel`` -- ylabel
- ``xAxisLimits`` -- the x-limits for the axes [xmin, xmax]
- ``yAxisLimits`` -- the y-limits for the axes [ymin, ymax]
- ``yAxisInvert`` -- invert the y-axis? Useful for lightcurves
- ``prependNum`` -- prepend this number to the output filename
- ``legend`` -- plot a legend?
**Return:**
- None |
376,747 | def update_progress_bar(
go, optext, start, total_files, files_sofar, total_bytes,
bytes_sofar, stdin_upload=False):
if (go.quiet or not go.progress_bar or
blobxfer.util.is_none_or_empty(go.log_file) or
start is None):
return
diff = (blobxfer.util.datetime_now() - start).total_seconds()
if diff <= 0:
diff = 1e-9
if total_bytes is None or total_bytes == 0 or bytes_sofar > total_bytes:
done = 0
else:
done = float(bytes_sofar) / total_bytes
rate = bytes_sofar / blobxfer.util.MEGABYTE / diff
if optext == :
rtext =
else:
rtext = optext +
if total_files is None:
fprog =
else:
fprog = .format(files_sofar, total_files)
if stdin_upload:
sys.stdout.write(
(
).format(
optext, * int(total_bytes % 30), rate, fprog, rtext)
)
else:
sys.stdout.write(
(
).format(
optext, * int(done * 30), done * 100, rate, fprog, rtext)
)
if files_sofar == total_files:
sys.stdout.write()
sys.stdout.flush() | Update the progress bar
:param blobxfer.models.options.General go: general options
:param str optext: operation prefix text
:param datetime.datetime start: start time
:param int total_files: total number of files
:param int files_sofar: files transfered so far
:param int total_bytes: total number of bytes
:param int bytes_sofar: bytes transferred so far
:param bool stdin_upload: stdin upload |
376,748 | def import_file_object(filename):
try:
handle = open(filename, )
file_obj = handle.read()
dict_obj = json.loads(file_obj)
except IOError as e:
logger.critical(
% (str(e), str(filename))
)
raise e
except ValueError:
logger.info(
%
(inspect.stack()[0][3], str(filename))
)
return file_obj
return dict_obj | Summary:
Imports block filesystem object
Args:
:filename (str): block filesystem object
Returns:
dictionary obj (valid json file), file data object |
376,749 | def downloadMARCXML(doc_id, library, base="nkc"):
downer = Downloader()
data = downer.download(
ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute(
DOC_ID=doc_id,
LIBRARY=library
)
)
dom = dhtmlparser.parseString(data)
error = dom.find("login")
if error:
error_msg = error[0].find("error")
if error_msg:
raise LibraryNotFoundException(
"Can" + str(doc_id) + "" + library + "')!\nMessage: " +
"\n".join(map(lambda x: x.getContent(), error_msg))
)
error = dom.find("ill-get-doc")
if error:
error_msg = error[0].find("error")
if error_msg:
raise DocumentNotFoundException(
"\n".join(map(lambda x: x.getContent(), error_msg))
)
return data | Download MARC XML document with given `doc_id` from given `library`.
Args:
doc_id (DocumentID): You will get this from :func:`getDocumentIDs`.
library (str): "``NKC01``" in our case, but don't worry,
:func:`getDocumentIDs` adds library specification into
:class:`DocumentID` named tuple.
Returns:
str: MARC XML unicode string.
Raises:
LibraryNotFoundException
DocumentNotFoundException |
376,750 | def get_caller_module():
stack = inspect.stack()
assert len(stack) > 1
caller = stack[2][0]
return caller.f_globals[] | Returns the name of the caller's module as a string.
>>> get_caller_module()
'__main__' |
376,751 | def paginate(self, request, offset=0, limit=None):
return self.collection.offset(offset).limit(limit), self.collection.count() | Paginate queryset. |
376,752 | def rename(self, **mapping):
params = {k: v for k, v in self.get_param_values() if k != }
return self.__class__(rename=mapping,
source=(self._source() if self._source else None),
linked=self.linked, **params) | The rename method allows stream parameters to be allocated to
new names to avoid clashes with other stream parameters of the
same name. Returns a new clone of the stream instance with the
specified name mapping. |
376,753 | def identify_missing(self, df, check_start=True):
data_missing = df.isnull() * 1
col_name = str(data_missing.columns[0])
if check_start & data_missing[col_name][0] == 1:
data_missing[col_name][0] = 0
return data_missing, col_name | Identify missing data.
Parameters
----------
df : pd.DataFrame()
Dataframe to check for missing data.
check_start : bool
turns 0 to 1 for the first observation, to display the start of the data
as the beginning of the missing data event
Returns
-------
pd.DataFrame(), str
dataframe where 1 indicates missing data and 0 indicates reported data,
returns the column name generated from the MDAL Query |
376,754 | def clear_text(self):
if (self.get_text_metadata().is_read_only() or
self.get_text_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map[] = \
dict(self.get_text_metadata().get_default_string_values()[0]) | stub |
376,755 | def add_px_err(isoel, col1, col2, px_um, inplace=False):
Isoelastics.check_col12(col1, col2)
if "deform" in [col1, col2]:
sign = +1
else:
sign = -1
if col1 == "area_um":
area_ax = 0
deci_ax = 1
else:
area_ax = 1
deci_ax = 0
new_isoel = []
for iso in isoel:
iso = np.array(iso, copy=not inplace)
ddeci = feat_emod.corrpix_deform_delta(area_um=iso[:, area_ax],
px_um=px_um)
iso[:, deci_ax] += sign * ddeci
new_isoel.append(iso)
return new_isoel | Undo pixelation correction
Isoelasticity lines are already corrected for pixelation
effects as described in
Mapping of Deformation to Apparent Young's Modulus
in Real-Time Deformability Cytometry
Christoph Herold, arXiv:1704.00572 [cond-mat.soft] (2017)
https://arxiv.org/abs/1704.00572.
If the isoealsticity lines are displayed with deformation data
that are not corrected, then the lines must be "un"-corrected,
i.e. the pixelation error must be added to the lines to match
the experimental data.
Parameters
----------
isoel: list of 2d ndarrays of shape (N, 3)
Each item in the list corresponds to one isoelasticity
line. The first column is defined by `col1`, the second
by `col2`, and the third column is the emodulus.
col1, col2: str
Define the fist to columns of each isoelasticity line.
One of ["area_um", "circ", "deform"]
px_um: float
Pixel size [µm] |
376,756 | def preorder_iter(self, filter_fn=None):
stack = [self]
while stack:
node = stack.pop()
if filter_fn is None or filter_fn(node):
yield node
stack.extend([i for i in reversed(node._children)]) | From DendroPy
Preorder traversal of self and its child_nodes. Returns self
and all descendants such that a node is returned before its
child_nodes (and their child_nodes). Filtered by filter_fn: node is
only returned if no filter_fn is given or if filter_fn returns
True. |
376,757 | def get_el_sp(obj):
if isinstance(obj, (Element, Specie, DummySpecie)):
return obj
if isinstance(obj, (list, tuple)):
return [get_el_sp(o) for o in obj]
try:
c = float(obj)
i = int(c)
i = i if i == c else None
except (ValueError, TypeError):
i = None
if i is not None:
return Element.from_Z(i)
try:
return Specie.from_string(obj)
except (ValueError, KeyError):
try:
return Element(obj)
except (ValueError, KeyError):
try:
return DummySpecie.from_string(obj)
except:
raise ValueError("Can't parse Element or String from type"
" %s: %s." % (type(obj), obj)) | Utility method to get an Element or Specie from an input obj.
If obj is in itself an element or a specie, it is returned automatically.
If obj is an int or a string representing an integer, the Element
with the atomic number obj is returned.
If obj is a string, Specie parsing will be attempted (e.g., Mn2+), failing
which Element parsing will be attempted (e.g., Mn), failing which
DummyElement parsing will be attempted.
Args:
obj (Element/Specie/str/int): An arbitrary object. Supported objects
are actual Element/Specie objects, integers (representing atomic
numbers) or strings (element symbols or species strings).
Returns:
Specie or Element, with a bias for the maximum number of properties
that can be determined.
Raises:
ValueError if obj cannot be converted into an Element or Specie. |
376,758 | def queues(self, page=None, per_page=None, previous=None, prefix=None):
options = {}
if page is not None:
raise Exception()
if per_page is not None:
options[] = per_page
if previous is not None:
options[] = previous
if prefix is not None:
options[] = prefix
query = urlencode(options)
url =
if query != :
url = "%s?%s" % (url, query)
result = self.client.get(url)
return [queue[] for queue in result[][]] | Execute an HTTP request to get a list of queues and return it.
Keyword arguments:
page -- The 0-based page to get queues from. Defaults to None, which
omits the parameter. |
376,759 | def foreach_(ctx, seq, expr):
from . import context, parse as uxpathparse
if hasattr(seq, ):
seq = seq.compute(ctx)
expr = next(string_arg(ctx, expr), )
pexpr = uxpathparse(expr)
for item in seq:
innerctx = ctx.copy(item=item)
yield from pexpr.compute(innerctx) | Yields the result of applying an expression to each item in the input sequence.
* seq: input sequence
* expr: expression to be converted to string, then dynamically evaluated for each item on the sequence to produce the result |
376,760 | def decode_varint_1(buffer, pos=0):
value = 0
shift = 0
memview = memoryview(buffer)
for i in range(pos, pos + 10):
try:
byte = _read_byte(memview, i)
except IndexError:
raise ValueError("End of byte stream")
if byte & 0x80 != 0:
value |= (byte & 0x7f) << shift
shift += 7
else:
value |= byte << shift
break
else:
raise ValueError("Out of double range")
return (value >> 1) ^ -(value & 1), i + 1 | Decode an integer from a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
buffer (bytes-like): any object acceptable by ``memoryview``
pos (int): optional position to read from
Returns:
(int, int): Decoded int value and next read position |
376,761 | def filter_by(zips=_zips, **kwargs):
return [z for z in zips if all([k in z and z[k] == v for k, v in kwargs.items()])] | Use `kwargs` to select for desired attributes from list of zipcode dicts |
376,762 | def fillDataProducts(self, dps):
item = None
for dp in dps:
if not dp.ignored:
item = self._makeDPItem(self, dp, item)
self._itemComboBox(item, self.ColAction)
self._itemComboBox(item, self.ColRender) | Fills listview with existing data products |
376,763 | def layout(request, ident, stateless=False, cache_id=None, **kwargs):
_, app = DashApp.locate_item(ident, stateless)
view_func = app.locate_endpoint_function()
resp = view_func()
initial_arguments = get_initial_arguments(request, cache_id)
response_data, mimetype = app.augment_initial_layout(resp, initial_arguments)
return HttpResponse(response_data,
content_type=mimetype) | Return the layout of the dash application |
376,764 | def _get_wv(sentence, ignore=False):
global _vectors
vectors = []
for y in sentence:
y_ = any2unicode(y).strip()
if y_ not in _stopwords:
syns = nearby(y_)[0]
c = []
try:
c.append(_vectors.word_vec(y_))
except KeyError as error:
if ignore:
continue
else:
logging.warning("not exist in w2v model: %s" % y_)
random_state = np.random.RandomState(seed=(hash(y_) % (2**32 - 1)))
c.append(random_state.uniform(low=-10.0, high=10.0, size=(100,)))
for n in syns:
if n is None: continue
try:
v = _vectors.word_vec(any2unicode(n))
except KeyError as error:
random_state = np.random.RandomState(seed=(hash(n) % (2 ** 32 - 1)))
v = random_state.uniform(low=10.0, high=10.0, size=(100,))
c.append(v)
r = np.average(c, axis=0)
vectors.append(r)
return vectors | get word2vec data by sentence
sentence is segmented string. |
376,765 | def parse(self, limit=None):
if limit is not None:
LOG.info("Only parsing first %d rows", limit)
rgd_file = .join(
(self.rawdir, self.files[][]))
p = GafParser()
assocs = p.parse(open(rgd_file, "r"))
for i, assoc in enumerate(assocs):
if in assoc.keys():
self.make_association(assoc)
if limit is not None and i > limit:
break
return | Override Source.parse()
Args:
:param limit (int, optional) limit the number of rows processed
Returns:
:return None |
376,766 | def createEditor(self, delegate, parent, option):
return ColorCtiEditor(self, delegate, parent=parent) | Creates a ColorCtiEditor.
For the parameters see the AbstractCti constructor documentation. |
376,767 | def list_images(self):
images = []
for image in (yield from self.query("GET", "images/json", params={"all": 0})):
if image[]:
for tag in image[]:
if tag != "<none>:<none>":
images.append({: tag})
return sorted(images, key=lambda i: i[]) | Gets Docker image list.
:returns: list of dicts
:rtype: list |
376,768 | def _evolve_reader(in_file):
cur_id_list = None
cur_tree = None
with open(in_file) as in_handle:
for line in in_handle:
if line.startswith("id,"):
if cur_id_list:
yield cur_id_list, cur_tree
cur_id_list = []
cur_tree = None
elif cur_tree is not None:
if line.strip() and not line.startswith("Number of non-empty"):
cur_tree.append(line.rstrip())
elif not line.strip() and cur_id_list and len(cur_id_list) > 0:
cur_tree = []
elif line.strip():
parts = []
for part in line.strip().split("\t"):
if part.endswith(","):
part = part[:-1]
parts.append(part)
if len(parts) > 4:
nid, freq, _, _, support = parts
cur_id_list.append((nid, freq, support.split("; ")))
if cur_id_list:
yield cur_id_list, cur_tree | Generate a list of region IDs and trees from a top_k_trees evolve.py file. |
376,769 | async def send_data(self, data, addr):
channel = self.peer_to_channel.get(addr)
if channel is None:
channel = self.channel_number
self.channel_number += 1
self.channel_to_peer[channel] = addr
self.peer_to_channel[addr] = channel
await self.channel_bind(channel, addr)
header = struct.pack(, channel, len(data))
self._send(header + data) | Send data to a remote host via the TURN server. |
376,770 | def match_bitap(self, text, pattern, loc):
start = max(1, 2 * loc - best_loc)
else:
break
if match_bitapScore(d + 1, loc) > score_threshold:
break
last_rd = rd
return best_loc | Locate the best instance of 'pattern' in 'text' near 'loc' using the
Bitap algorithm.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1. |
376,771 | def format_results(self, results):
if not results:
return None
userdn = results[0][0]
userobj = results[0][1]
userobj[] = userdn
keymap = self.config.get()
if keymap:
return {key:scalar(userobj.get(value)) for key, value in keymap.items() if _is_utf8(scalar(userobj.get(value))) }
else:
return {key:scalar(value) for key, value in userobj.items() if _is_utf8(scalar(value)) } | Format the ldap results object into somthing that is reasonable |
376,772 | def get_hstwcs(filename,hdulist,extnum):
hdrwcs = wcsutil.HSTWCS(hdulist,ext=extnum)
hdrwcs.filename = filename
hdrwcs.expname = hdulist[extnum].header[]
hdrwcs.extver = hdulist[extnum].header[]
return hdrwcs | Return the HSTWCS object for a given chip. |
376,773 | def _get_nets_krnic(self, *args, **kwargs):
from warnings import warn
warn(
)
return self.get_nets_krnic(*args, **kwargs) | Deprecated. This will be removed in a future release. |
376,774 | def filetree(self):
tree = {}
prefix = []
paths = (f.split(os.sep) for f in self.files)
for path in paths:
dirpath = path[:-1]
filename = path[-1]
subtree = tree
for item in dirpath:
if item not in subtree:
subtree[item] = {}
subtree = subtree[item]
subtree[filename] = None
return tree | :attr:`files` as a dictionary tree
Each node is a ``dict`` that maps directory/file names to child nodes.
Each child node is a ``dict`` for directories and ``None`` for files.
If :attr:`path` is ``None``, this is an empty ``dict``. |
376,775 | def jsonnummultby(self, name, path, number):
return self.execute_command(, name, str_path(path), self._encode(number)) | Multiplies the numeric (integer or floating point) JSON value under
``path`` at key ``name`` with the provided ``number`` |
376,776 | def do_read(self, args):
args = args.split()
if _debug: ReadPropertyMultipleConsoleCmd._debug("do_read %r", args)
try:
i = 0
addr = args[i]
i += 1
read_access_spec_list = []
while i < len(args):
obj_id = ObjectIdentifier(args[i]).value
i += 1
prop_reference_list = []
while i < len(args):
prop_id = args[i]
if prop_id not in PropertyIdentifier.enumerations:
break
i += 1
if prop_id in (, , ):
pass
else:
datatype = get_datatype(obj_id[0], prop_id)
if not datatype:
raise ValueError("invalid property for object type")
prop_reference = PropertyReference(
propertyIdentifier=prop_id,
)
if (i < len(args)) and args[i].isdigit():
prop_reference.propertyArrayIndex = int(args[i])
i += 1
prop_reference_list.append(prop_reference)
if not prop_reference_list:
raise ValueError("provide at least one property")
read_access_spec = ReadAccessSpecification(
objectIdentifier=obj_id,
listOfPropertyReferences=prop_reference_list,
)
read_access_spec_list.append(read_access_spec)
if not read_access_spec_list:
raise RuntimeError("at least one read access specification required")
request = ReadPropertyMultipleRequest(
listOfReadAccessSpecs=read_access_spec_list,
)
request.pduDestination = Address(addr)
if _debug: ReadPropertyMultipleConsoleCmd._debug(" - request: %r", request)
iocb = IOCB(request)
if _debug: ReadPropertyMultipleConsoleCmd._debug(" - iocb: %r", iocb)
deferred(this_application.request_io, iocb)
iocb.wait()
if iocb.ioResponse:
apdu = iocb.ioResponse
if not isinstance(apdu, ReadPropertyMultipleACK):
if _debug: ReadPropertyMultipleConsoleCmd._debug(" - not an ack")
return
for result in apdu.listOfReadAccessResults:
objectIdentifier = result.objectIdentifier
if _debug: ReadPropertyMultipleConsoleCmd._debug(" - objectIdentifier: %r", objectIdentifier)
for element in result.listOfResults:
propertyIdentifier = element.propertyIdentifier
if _debug: ReadPropertyMultipleConsoleCmd._debug(" - propertyIdentifier: %r", propertyIdentifier)
propertyArrayIndex = element.propertyArrayIndex
if _debug: ReadPropertyMultipleConsoleCmd._debug(" - propertyArrayIndex: %r", propertyArrayIndex)
readResult = element.readResult
sys.stdout.write(propertyIdentifier)
if propertyArrayIndex is not None:
sys.stdout.write("[" + str(propertyArrayIndex) + "]")
if readResult.propertyAccessError is not None:
sys.stdout.write(" ! " + str(readResult.propertyAccessError) + )
else:
propertyValue = readResult.propertyValue
datatype = get_datatype(objectIdentifier[0], propertyIdentifier)
if _debug: ReadPropertyMultipleConsoleCmd._debug(" - datatype: %r", datatype)
if not datatype:
raise TypeError("unknown datatype")
if issubclass(datatype, Array) and (propertyArrayIndex is not None):
if propertyArrayIndex == 0:
value = propertyValue.cast_out(Unsigned)
else:
value = propertyValue.cast_out(datatype.subtype)
else:
value = propertyValue.cast_out(datatype)
if _debug: ReadPropertyMultipleConsoleCmd._debug(" - value: %r", value)
sys.stdout.write(" = " + str(value) + )
sys.stdout.flush()
if iocb.ioError:
sys.stdout.write(str(iocb.ioError) + )
except Exception, error:
ReadPropertyMultipleConsoleCmd._exception("exception: %r", error) | read <addr> ( <objid> ( <prop> [ <indx> ] )... )... |
376,777 | def array2tree(arr, name=, tree=None):
import ROOT
if tree is not None:
if not isinstance(tree, ROOT.TTree):
raise TypeError("tree must be a ROOT.TTree")
incobj = ROOT.AsCObject(tree)
else:
incobj = None
cobj = _librootnumpy.array2tree_toCObj(arr, name=name, tree=incobj)
return ROOT.BindObject(cobj, ) | Convert a numpy structured array into a ROOT TTree.
Fields of basic types, strings, and fixed-size subarrays of basic types are
supported. ``np.object`` and ``np.float16`` are currently not supported.
Parameters
----------
arr : array
A numpy structured array
name : str (optional, default='tree')
Name of the created ROOT TTree if ``tree`` is None.
tree : ROOT TTree (optional, default=None)
An existing ROOT TTree to be extended by the numpy array. Any branch
with the same name as a field in the numpy array will be extended as
long as the types are compatible, otherwise a TypeError is raised. New
branches will be created and filled for all new fields.
Returns
-------
root_tree : a ROOT TTree
Notes
-----
When using the ``tree`` argument to extend and/or add new branches to an
existing tree, note that it is possible to create branches of different
lengths. This will result in a warning from ROOT when root_numpy calls the
tree's ``SetEntries()`` method. Beyond that, the tree should still be
usable. While it might not be generally recommended to create branches with
differing lengths, this behaviour could be required in certain situations.
root_numpy makes no attempt to prevent such behaviour as this would be more
strict than ROOT itself. Also see the note about converting trees that have
branches of different lengths into numpy arrays in the documentation of
:func:`tree2array`.
See Also
--------
array2root
root2array
tree2array
Examples
--------
Convert a numpy array into a tree:
>>> from root_numpy import array2tree
>>> import numpy as np
>>>
>>> a = np.array([(1, 2.5, 3.4),
... (4, 5, 6.8)],
... dtype=[('a', np.int32),
... ('b', np.float32),
... ('c', np.float64)])
>>> tree = array2tree(a)
>>> tree.Scan()
************************************************
* Row * a * b * c *
************************************************
* 0 * 1 * 2.5 * 3.4 *
* 1 * 4 * 5 * 6.8 *
************************************************
Add new branches to an existing tree (continuing from the example above):
>>> b = np.array([(4, 10),
... (3, 5)],
... dtype=[('d', np.int32),
... ('e', np.int32)])
>>> array2tree(b, tree=tree)
<ROOT.TTree object ("tree") at 0x1449970>
>>> tree.Scan()
************************************************************************
* Row * a * b * c * d * e *
************************************************************************
* 0 * 1 * 2.5 * 3.4 * 4 * 10 *
* 1 * 4 * 5 * 6.8 * 3 * 5 *
************************************************************************ |
376,778 | def prov(self):
if not self._prov:
self._prov = self._api.get_bundle(self._document.id, self._id)
return self._prov | :return: This bundle's provenance
:rtype: :py:class:`prov.model.ProvDocument` |
376,779 | def update_rtfilters(self):
new_peer_to_rtfilter_map = self._compute_rtfilter_map()
for peer in self._peer_manager.iterpeers:
pre_rt_filter = self._rt_mgr.peer_to_rtfilter_map.get(peer, set())
curr_rt_filter = new_peer_to_rtfilter_map.get(peer, set())
old_rts = pre_rt_filter - curr_rt_filter
new_rts = curr_rt_filter - pre_rt_filter
if new_rts or old_rts:
LOG.debug(
,
peer.ip_address, new_rts, old_rts)
self._on_update_rt_filter(peer, new_rts, old_rts)
self._peer_manager.set_peer_to_rtfilter_map(new_peer_to_rtfilter_map)
self._rt_mgr.peer_to_rtfilter_map = new_peer_to_rtfilter_map
LOG.debug(, self._rt_mgr.peer_to_rtfilter_map)
self._rt_mgr.update_interested_rts() | Updates RT filters for each peer.
Should be called if a new RT Nlri's have changed based on the setting.
Currently only used by `Processor` to update the RT filters after it
has processed a RT destination. If RT filter has changed for a peer we
call RT filter change handler. |
376,780 | def get_token_by_code(self, code):
url =
data = {: self.client_id,
: self.client_secret,
: ,
: code,
: self.redirect_uri}
r = requests.post(url, data=data)
check_error(r)
return r.json() | return origin json |
376,781 | def fetch_weeks(self, weeks, overwrite=False):
esf = ElasticsearchFetcher(self.store, self.config)
for year, week in weeks:
print("Fetch {}-{}".format(year, week))
esf.fetch(year, week, overwrite) | Fetch and cache the requested weeks. |
376,782 | def delete_router(self, router):
router_id = self._find_router_id(router)
ret = self.network_conn.delete_router(router=router_id)
return ret if ret else True | Delete the specified router |
376,783 | def case_insensitive(self):
if "--case-ins" in self.flag:
data_dict = Utils().case_sensitive(self.data)
for key, value in data_dict.iteritems():
if key == self.name.lower():
self.name = value | Matching packages distinguish between uppercase and
lowercase |
376,784 | def execute_system_command(arg, **_):
usage = "Syntax: system [command].\n"
if not arg:
return [(None, None, None, usage)]
try:
command = arg.strip()
if command.startswith():
ok, error_message = handle_cd_command(arg)
if not ok:
return [(None, None, None, error_message)]
return [(None, None, None, )]
args = arg.split()
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
response = output if not error else error
if isinstance(response, bytes):
encoding = locale.getpreferredencoding(False)
response = response.decode(encoding)
return [(None, None, None, response)]
except OSError as e:
return [(None, None, None, % e.strerror)] | Execute a system shell command. |
376,785 | def print_pack(document_loader,
processobj,
uri,
metadata
):
packed = pack(document_loader, processobj, uri, metadata)
if len(packed["$graph"]) > 1:
return json_dumps(packed, indent=4)
return json_dumps(packed["$graph"][0], indent=4) | Return a CWL serialization of the CWL document in JSON. |
376,786 | def get_limit_action(self, criticity, stat_name=""):
ret = [(stat_name + + criticity + , False),
(stat_name + + criticity + , True),
(self.plugin_name + + criticity + , False),
(self.plugin_name + + criticity + , True)]
for r in ret:
if r[0] in self._limits:
return self._limits[r[0]], r[1]
raise KeyError | Return the tuple (action, repeat) for the alert.
- action is a command line
- repeat is a bool |
376,787 | def tryLoadingFrom(tryPath,moduleName=):
if not in swhlab.__file__:
print("loaded custom swhlab module from",
os.path.dirname(swhlab.__file__))
return
while len(tryPath)>5:
sp=tryPath+"/swhlab/"
if os.path.isdir(sp) and os.path.exists(sp+"/__init__.py"):
if not os.path.dirname(tryPath) in sys.path:
sys.path.insert(0,os.path.dirname(tryPath))
print("
print("
print("
tryPath=os.path.dirname(tryPath)
return | if the module is in this path, load it from the local folder. |
376,788 | def linearRegression(requestContext, seriesList, startSourceAt=None,
endSourceAt=None):
from .app import evaluateTarget
results = []
sourceContext = requestContext.copy()
if startSourceAt is not None:
sourceContext[] = parseATTime(startSourceAt)
if endSourceAt is not None:
sourceContext[] = parseATTime(endSourceAt)
sourceList = []
for series in seriesList:
source = evaluateTarget(sourceContext, series.pathExpression)
sourceList.extend(source)
for source, series in zip(sourceList, seriesList):
newName = % (
series.name,
int(epoch(sourceContext[])),
int(epoch(sourceContext[])))
forecast = linearRegressionAnalysis(source)
if forecast is None:
continue
factor, offset = forecast
values = [offset + (series.start + i * series.step) * factor
for i in range(len(series))]
newSeries = TimeSeries(newName, series.start, series.end,
series.step, values)
newSeries.pathExpression = newSeries.name
results.append(newSeries)
return results | Graphs the liner regression function by least squares method.
Takes one metric or a wildcard seriesList, followed by a quoted string
with the time to start the line and another quoted string with the time
to end the line. The start and end times are inclusive (default range is
from to until). See ``from / until`` in the render\_api_ for examples of
time formats. Datapoints in the range is used to regression.
Example::
&target=linearRegression(Server.instance01.threads.busy,'-1d')
&target=linearRegression(Server.instance*.threads.busy,
"00:00 20140101","11:59 20140630") |
376,789 | def save_image(self, img, filename=None, **kwargs):
filename = filename or self.get_filename(**img.data.attrs)
nt.save(img, filename, **kwargs) | Save the image to the given *filename* in ninjotiff_ format.
.. _ninjotiff: http://www.ssec.wisc.edu/~davidh/polar2grid/misc/NinJo_Satellite_Import_Formats.html |
376,790 | def create_token_for_user(user: get_user_model()) -> bytes:
token = urandom(48)
AuthToken.objects.create(
hashed_token=AuthToken._hash_token(token),
user=user)
return token | Create a new random auth token for user. |
376,791 | def _find_by_sha1(self, sha1):
for image_part in self:
if not hasattr(image_part, ):
continue
if image_part.sha1 == sha1:
return image_part
return None | Return an |ImagePart| object belonging to this package or |None| if
no matching image part is found. The image part is identified by the
SHA1 hash digest of the image binary it contains. |
376,792 | def dijkstra(G, start, weight=):
if start not in G.vertices:
raise GraphInsertError("Vertex %s doesn't exist." % (start,))
visited = {start: 0}
path = {}
vertices = set(G.vertices.keys())
while vertices:
min_vertex = None
for vertex in vertices:
if vertex in visited:
if min_vertex is None or visited[vertex] < visited[min_vertex]:
min_vertex = vertex
if min_vertex is None:
break
vertices.remove(min_vertex)
current_weight = visited[min_vertex]
for edge in G.vertices[min_vertex]:
edge_weight = current_weight + G.edges[(min_vertex, edge)][weight]
if edge not in visited or edge_weight < visited[edge]:
visited[edge] = edge_weight
path[edge] = min_vertex
return visited, path | Compute shortest path length between satrt
and all other reachable nodes for a weight graph.
return -> ({vertex: weight form start, }, {vertex: predeseccor, }) |
376,793 | def plot_report(report, success_name, fail_names, label=None,
is_max_confidence=True,
linewidth=LINEWIDTH,
plot_upper_bound=True):
(fail_optimal, success_optimal, fail_lower_bound, fail_upper_bound,
success_bounded) = make_curve(report, success_name, fail_names)
assert len(fail_lower_bound) == len(fail_upper_bound)
fail_optimal = np.array(fail_optimal)
fail_lower_bound = np.array(fail_lower_bound)
fail_upper_bound = np.array(fail_upper_bound)
if is_max_confidence:
p, = pyplot.plot(fail_optimal, success_optimal, label=label,
linewidth=linewidth)
color = p.get_color()
pyplot.plot(fail_lower_bound, success_bounded, , color=color)
if plot_upper_bound:
pyplot.plot(fail_upper_bound, success_bounded, , color=color)
else:
all_fail = np.concatenate((fail_optimal, fail_lower_bound), axis=0)
pyplot.plot(all_fail, success_optimal + success_bounded,
, label=label, linewidth=linewidth)
pyplot.xlabel("Failure rate on adversarial examples")
pyplot.ylabel("Success rate on clean examples")
gap = fail_upper_bound - fail_lower_bound
if gap.size > 0:
assert gap.min() >= 0.
print("Max gap: ", gap.max()) | Plot a success fail curve from a confidence report
:param report: A confidence report
(the type of object saved by make_confidence_report.py)
:param success_name: see plot_report_from_path
:param fail_names: see plot_report_from_path
:param label: see plot_report_from_path
:param is_max_confidence: see plot_report_from_path
:param linewidth: see plot_report_from_path |
376,794 | def longestorf(args):
p = OptionParser(longestorf.__doc__)
p.add_option("--ids", action="store_true",
help="Generate table with ORF info [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
pf = fastafile.rsplit(".", 1)[0]
orffile = pf + ".orf.fasta"
idsfile = None
if opts.ids:
idsfile = pf + ".orf.ids"
fwids = open(idsfile, "w")
f = Fasta(fastafile, lazy=True)
fw = must_open(orffile, "w")
before, after = 0, 0
for name, rec in f.iteritems_ordered():
cds = rec.seq
before += len(cds)
orf = ORFFinder(cds)
lorf = orf.get_longest_orf()
newcds = Seq(lorf)
after += len(newcds)
newrec = SeqRecord(newcds, id=name, description=rec.description)
SeqIO.write([newrec], fw, "fasta")
if idsfile:
print("\t".join((name, orf.info)), file=fwids)
fw.close()
if idsfile:
fwids.close()
logging.debug("Longest ORFs written to `{0}` ({1}).".\
format(orffile, percentage(after, before)))
return orffile | %prog longestorf fastafile
Find longest ORF for each sequence in fastafile. |
376,795 | def get_bins(self):
if self.retrieved:
raise errors.IllegalState()
self.retrieved = True
return objects.BinList(self._results, runtime=self._runtime) | Gets the bin list resulting from the search.
return: (osid.resource.BinList) - the bin list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.* |
376,796 | def resolve(input, representation, resolvers=None, get3d=False, **kwargs):
results = query(input, representation, resolvers, False, get3d, **kwargs)
result = results[0].value if results else None
return result | Resolve input to the specified output representation.
:param string input: Chemical identifier to resolve
:param string representation: Desired output representation
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:returns: Output representation or None
:rtype: string or None
:raises HTTPError: if CIR returns an error code
:raises ParseError: if CIR response is uninterpretable |
376,797 | def _cast(cls, base_info, take_ownership=True):
type_value = base_info.type.value
try:
new_obj = cast(base_info, cls.__types[type_value])
except KeyError:
new_obj = base_info
if take_ownership:
assert not base_info.__owns
new_obj._take_ownership()
return new_obj | Casts a GIBaseInfo instance to the right sub type.
The original GIBaseInfo can't have ownership.
Will take ownership. |
376,798 | def request_handler(self, can_handle_func):
def wrapper(handle_func):
if not callable(can_handle_func) or not callable(handle_func):
raise SkillBuilderException(
"Request Handler can_handle_func and handle_func "
"input parameters should be callable")
class_attributes = {
"can_handle": lambda self, handler_input: can_handle_func(
handler_input),
"handle": lambda self, handler_input: handle_func(
handler_input)
}
request_handler_class = type(
"RequestHandler{}".format(
handle_func.__name__.title().replace("_", "")),
(AbstractRequestHandler,), class_attributes)
self.add_request_handler(request_handler=request_handler_class())
return wrapper | Decorator that can be used to add request handlers easily to
the builder.
The can_handle_func has to be a Callable instance, which takes
a single parameter and no varargs or kwargs. This is because
of the RequestHandler class signature restrictions. The
returned wrapper function can be applied as a decorator on any
function that returns a response object by the skill. The
function should follow the signature of the handle function in
:py:class:`ask_sdk_runtime.dispatch_components.request_components.AbstractRequestHandler`
class.
:param can_handle_func: The function that validates if the
request can be handled.
:type can_handle_func: Callable[[Input], bool]
:return: Wrapper function that can be decorated on a handle
function. |
376,799 | def add_lifecycle_set_storage_class_rule(self, storage_class, **kw):
rules = list(self.lifecycle_rules)
rules.append(LifecycleRuleSetStorageClass(storage_class, **kw))
self.lifecycle_rules = rules | Add a "delete" rule to lifestyle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
.. literalinclude:: snippets.py
:start-after: [START add_lifecycle_set_storage_class_rule]
:end-before: [END add_lifecycle_set_storage_class_rule]
:type storage_class: str, one of :attr:`_STORAGE_CLASSES`.
:param storage_class: new storage class to assign to matching items.
:type kw: dict
:params kw: arguments passed to :class:`LifecycleRuleConditions`. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.