Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
8,700 | def apply_chromatic_adaptation_on_color(color, targ_illum, adaptation=):
xyz_x = color.xyz_x
xyz_y = color.xyz_y
xyz_z = color.xyz_z
orig_illum = color.illuminant
targ_illum = targ_illum.lower()
observer = color.observer
adaptation = adaptation.lower()
color.xyz_x, color.xyz_y, color.xyz_z = apply_chromatic_adaptation(
xyz_x, xyz_y, xyz_z, orig_illum, targ_illum,
observer=observer, adaptation=adaptation)
color.set_illuminant(targ_illum)
return color | Convenience function to apply an adaptation directly to a Color object. |
8,701 | def generate_apsara_log_config(json_value):
input_detail = json_value[]
output_detail = json_value[]
config_name = json_value[]
logSample = json_value.get(, )
logstore_name = output_detail[]
endpoint = output_detail.get(, )
log_path = input_detail[]
file_pattern = input_detail[]
log_begin_regex = input_detail.get(, )
topic_format = input_detail[]
filter_keys = input_detail[]
filter_keys_reg = input_detail[]
config = ApsaraLogConfigDetail(config_name, logstore_name, endpoint, log_path, file_pattern,
log_begin_regex, topic_format, filter_keys, filter_keys_reg, logSample)
return config | Generate apsara logtail config from loaded json value
:param json_value:
:return: |
8,702 | def xmlrpc_task_done(self, result):
(task_id, task_results) = result
del self.scheduled_tasks[task_id]
self.task_store.update_results(task_id, task_results)
self.results += 1
return True | Take the results of a computation and put it into the results list. |
8,703 | def _set_digraph_b(self, char):
self.has_digraph_b = True
self.active_vowel_ro = di_b_lt[char][0]
self.active_dgr_b_info = di_b_lt[char] | Sets the second part of a digraph. |
8,704 | def aspirate(self,
volume: float = None,
location: Union[types.Location, Well] = None,
rate: float = 1.0) -> :
self._log.debug("aspirate {} from {} at {}"
.format(volume,
location if location else ,
rate))
if isinstance(location, Well):
point, well = location.bottom()
loc = types.Location(
point + types.Point(0, 0, self.well_bottom_clearance),
well)
self.move_to(loc)
elif isinstance(location, types.Location):
loc = location
self.move_to(location)
elif location is not None:
raise TypeError(
.format(location))
elif self._ctx.location_cache:
loc = self._ctx.location_cache
else:
raise RuntimeError(
"If aspirate is called without an explicit location, another"
" method that moves to a location (such as move_to or "
"dispense) must previously have been called so the robot "
"knows where it is.")
cmds.do_publish(self.broker, cmds.aspirate, self.aspirate,
, None, None, self, volume, loc, rate)
self._hw_manager.hardware.aspirate(self._mount, volume, rate)
cmds.do_publish(self.broker, cmds.aspirate, self.aspirate,
, self, None, self, volume, loc, rate)
return self | Aspirate a volume of liquid (in microliters/uL) using this pipette
from the specified location
If only a volume is passed, the pipette will aspirate
from its current position. If only a location is passed,
:py:meth:`aspirate` will default to its :py:attr:`max_volume`.
:param volume: The volume to aspirate, in microliters. If not
specified, :py:attr:`max_volume`.
:type volume: int or float
:param location: Where to aspirate from. If `location` is a
:py:class:`.Well`, the robot will aspirate from
:py:attr:`well_bottom_clearance` mm
above the bottom of the well. If `location` is a
:py:class:`.Location` (i.e. the result of
:py:meth:`.Well.top` or :py:meth:`.Well.bottom`), the
robot will aspirate from the exact specified location.
If unspecified, the robot will aspirate from the
current position.
:param rate: The relative plunger speed for this aspirate. During
this aspirate, the speed of the plunger will be
`rate` * :py:attr:`aspirate_speed`. If not specified,
defaults to 1.0 (speed will not be modified).
:type rate: float
:returns: This instance. |
8,705 | def update(self, y=None, inplace=False, **kwargs):
kwargs.update({: kwargs.pop(, self.k)})
kwargs.update({: kwargs.pop(, self.pct)})
kwargs.update({: kwargs.pop(, self._truncated)})
if inplace:
self._update(y, **kwargs)
else:
new = copy.deepcopy(self)
new._update(y, **kwargs)
return new | Add data or change classification parameters.
Parameters
----------
y : array
(n,1) array of data to classify
inplace : bool
whether to conduct the update in place or to return a
copy estimated from the additional specifications.
Additional parameters provided in **kwargs are passed to the init
function of the class. For documentation, check the class constructor. |
8,706 | def toto(arch_name, comment=, clear=False, read_comment=False, list_members=False, time_show=False):
if comment and clear:
clingon.RunnerError("You cannot specify --comment and --clear together")
z = None
if not os.path.isfile(arch_name):
print "Creating archive", arch_name
z = zipfile.ZipFile(arch_name, )
for f in [x for x in glob.iglob() if not x.endswith()][:3]:
print " Add file %s to %s" % (f, arch_name)
z.write(f)
if comment:
mtime = os.path.getmtime(arch_name)
if not z:
z = zipfile.ZipFile(arch_name, )
z.comment = comment
if z:
z.close()
if comment:
os.utime(arch_name, (time.time(), mtime))
if read_comment:
z = zipfile.ZipFile(arch_name, )
print "Comment:", z.comment, len(z.comment)
if list_members:
z = zipfile.ZipFile(arch_name, )
print "Members:", z.namelist()
if time_show:
print "Access time:", time.ctime(os.path.getatime(arch_name))
print "Modif time:", time.ctime(os.path.getmtime(arch_name)) | Small utility for changing comment in a zip file
without changing the file modification datetime. |
8,707 | def perform_command(self):
if self.has_option([u"--list-parameters"]):
return self.print_parameters()
if len(self.actual_arguments) < 2:
return self.print_help()
container_path = self.actual_arguments[0]
output_directory_path = self.actual_arguments[1]
config_string = None
if (len(self.actual_arguments)) > 2 and (not self.actual_arguments[2].startswith(u"-")):
config_string = self.actual_arguments[2]
validate = not self.has_option(u"--skip-validator")
if self.has_option(u"--cewsubprocess"):
self.rconf[RuntimeConfiguration.CEW_SUBPROCESS_ENABLED] = True
if not self.check_input_file_or_directory(container_path):
return self.ERROR_EXIT_CODE
if not self.check_output_directory(output_directory_path):
return self.ERROR_EXIT_CODE
if validate:
try:
self.print_info(u"Validating the container (specify --skip-validator to bypass)...")
validator = Validator(rconf=self.rconf, logger=self.logger)
result = validator.check_container(container_path, config_string=config_string)
if not result.passed:
self.print_error(u"The given container is not valid:")
self.print_error(result.pretty_print())
return self.ERROR_EXIT_CODE
self.print_info(u"Validating the container... done")
except Exception as exc:
self.print_error(u"An unexpected error occurred while validating the container:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
try:
self.print_info(u"Loading job from container...")
executor = ExecuteJob(rconf=self.rconf, logger=self.logger)
executor.load_job_from_container(container_path, config_string)
self.print_info(u"Loading job from container... done")
except Exception as exc:
self.print_error(u"An unexpected error occurred while loading the job:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
try:
self.print_info(u"Executing...")
executor.execute()
self.print_info(u"Executing... done")
except Exception as exc:
self.print_error(u"An unexpected error occurred while executing the job:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
try:
self.print_info(u"Creating output container...")
path = executor.write_output_container(output_directory_path)
self.print_info(u"Creating output container... done")
self.print_success(u"Created output file " % path)
executor.clean(True)
return self.NO_ERROR_EXIT_CODE
except Exception as exc:
self.print_error(u"An unexpected error occurred while writing the output container:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE | Perform command and return the appropriate exit code.
:rtype: int |
8,708 | def add_node(self,node):
if not isinstance(node, CondorDAGNode):
raise CondorDAGError, "Nodes must be class CondorDAGNode or subclass"
if not isinstance(node.job(), CondorDAGManJob):
node.set_log_file(self.__log_file_path)
self.__nodes.append(node)
if self.__integer_node_names:
node.set_name(str(self.__node_count))
self.__node_count += 1
if node.job() not in self.__jobs:
self.__jobs.append(node.job()) | Add a CondorDAGNode to this DAG. The CondorJob that the node uses is
also added to the list of Condor jobs in the DAG so that a list of the
submit files needed by the DAG can be maintained. Each unique CondorJob
will be added once to prevent duplicate submit files being written.
@param node: CondorDAGNode to add to the CondorDAG. |
8,709 | def _api_item_history(self, plugin, item, nb=0):
return self._api_itemvalue(plugin, item, history=True, nb=int(nb)) | Glances API RESTful implementation.
Return the JSON representation of the couple plugin/history of item
HTTP/200 if OK
HTTP/400 if plugin is not found
HTTP/404 if others error |
8,710 | def withdraw(self, amount):
pg = self.usr.getPage("http://www.neopets.com/bank.phtml")
try:
results = pg.find(text = "Account Type:").parent.parent.parent.find_all("td", align="center")
self.balance = results[1].text.replace(" NP", "")
except Exception:
logging.getLogger("neolib.user").exception("Could not parse userpgtypewithdrawamounts great to see you again" in pg.content:
self.__loadDetails(pg)
return True
else:
logging.getLogger("neolib.user").info("Failed to withdraw NPs for unknown reason. User NPs: " + str(self.usr.nps) + ". Amount: " + str(amount), {: pg})
return False | Withdraws specified neopoints from the user's account, returns result
Parameters:
amount (int) -- Amount of neopoints to withdraw
Returns
bool - True if successful, False otherwise
Raises
notEnoughBalance |
8,711 | def query(self, sql, parameters=None):
try:
self._cursor.execute(sql, parameters)
except psycopg2.Error as err:
self._incr_exceptions()
raise err
finally:
self._incr_executions()
return results.Results(self._cursor) | A generator to issue a query on the server, mogrifying the
parameters against the sql statement. Results are returned as a
:py:class:`queries.Results` object which can act as an iterator and
has multiple ways to access the result data.
:param str sql: The SQL statement
:param dict parameters: A dictionary of query parameters
:rtype: queries.Results
:raises: queries.DataError
:raises: queries.DatabaseError
:raises: queries.IntegrityError
:raises: queries.InternalError
:raises: queries.InterfaceError
:raises: queries.NotSupportedError
:raises: queries.OperationalError
:raises: queries.ProgrammingError |
8,712 | def push(self, value: Union[int, bytes]) -> None:
if len(self.values) > 1023:
raise FullStack()
validate_stack_item(value)
self.values.append(value) | Push an item onto the stack. |
8,713 | def _remove_from_world(self):
self.on_remove_from_world()
self._extensions = {}
self._disable_forum_observation()
self._world = None
self._id = None | Clear all the internal data the token needed while it was part of
the world.
Note that this method doesn't actually remove the token from the
world. That's what World._remove_token() does. This method is just
responsible for setting the internal state of the token being removed. |
8,714 | def _setAttributes(self, attributes):
atd = self.attribute_typecode
atd_list = formatted_attribute_list = []
if not attributes:
return formatted_attribute_list
atd_list.append()
idx = 0
while(idx < len(attributes)):
a = attributes[idx]
idx += 1
if a.isWildCard() and a.isDeclaration():
atd_list.append(\
\
% (atd, SCHEMA.XSD3)
)
elif a.isDeclaration():
tdef = a.getTypeDefinition()
if tdef is not None:
tc = %(NAD.getAlias(tdef.getTargetNamespace()),
self.mangle(type_class_name(tdef.getAttributeName()))
)
else:
t = a.getAttribute()
try:
tc = BTI.get_typeclass(t[1], t[0])
except:
tc = ZSI.TC.String
if tc is not None:
tc = %tc
key = None
if a.getAttribute() == :
key = % ( a.getTargetNamespace(),
a.getAttribute() )
elif a.getAttribute() == :
key = % a.getAttribute()
else:
raise ContainerError, \
\
% a.getAttribute()
atd_list.append(\
% (atd, key, tc)
)
elif a.isReference() and a.isAttributeGroup():
return formatted_attribute_list | parameters
attributes -- a flat list of all attributes,
from this list all items in attribute_typecode_dict will
be generated into attrComponents.
returns a list of strings representing the attribute_typecode_dict. |
8,715 | def dst_to_src(self, dst_file):
for map in self.mappings:
src_uri = map.dst_to_src(dst_file)
if (src_uri is not None):
return(src_uri)
raise MapperError(
"Unable to translate destination path (%s) "
"into a source URI." % (dst_file)) | Map destination path to source URI. |
8,716 | def get_img_tag(self, title=, alt_text=, **kwargs):
try:
style = []
for key in (, ):
if key in kwargs:
if isinstance(kwargs[key], (list, tuple, set)):
style += list(kwargs[key])
else:
style.append(kwargs[key])
if in kwargs:
shape = self._get_shape_style(**kwargs)
if shape:
style.append("shape-outside: url()".format(shape))
attrs = {
: alt_text,
: title,
**self.get_img_attrs(style, **kwargs)
}
return flask.Markup(
self._wrap_link_target(
kwargs,
utils.make_tag(
, attrs, start_end=kwargs.get()),
title))
except FileNotFoundError as error:
text = .format(
html.escape(error.filename))
if in error.filename:
text +=
text +=
return flask.Markup(text) | Build a <img> tag for the image with the specified options.
Returns: an HTML fragment. |
8,717 | def ContainsKey(self, public_key):
return self.ContainsKeyHash(Crypto.ToScriptHash(public_key.encode_point(True), unhex=True)) | Test if the wallet contains the supplied public key.
Args:
public_key (edcsa.Curve.point): a public key to test for its existance. e.g. KeyPair.PublicKey
Returns:
bool: True if exists, False otherwise. |
8,718 | def isSelectionPositionValid(self, selPos: tuple):
if selPos is None:
return False
if len(selPos) != 4:
return False
check1 = self.isPositionValid(*selPos[:2])
check2 = self.isPositionValid(*selPos[2:])
if check1 and check2:
return True
else:
return False | Return **True** if the start- and end position denote valid
positions within the document.
|Args|
* ``selPos`` (**tuple**): tuple with four integers.
|Returns|
**bool**: **True** if the positions are valid; **False** otherwise.
|Raises|
* **None** |
8,719 | def get_health_events(self, recipient):
if recipient not in self.addresses_events:
self.start_health_check(recipient)
return self.addresses_events[recipient] | Starts a healthcheck task for `recipient` and returns a
HealthEvents with locks to react on its current state. |
8,720 | def cached_download(url, name):
clean_name = os.path.normpath(name)
if clean_name != name:
raise ValueError("{} is not normalized.".format(name))
for dir_ in iter_data_dirs():
path = os.path.join(dir_, name)
if os.path.exists(path):
return path
dir_ = next(iter_data_dirs(True))
path = os.path.join(dir_, name)
log.info("Downloading {} to {}".format(url, path))
response = urlopen(url)
if response.getcode() != 200:
raise ValueError("HTTP {}".format(response.getcode()))
dir_ = os.path.dirname(path)
try:
os.makedirs(dir_)
except OSError as e:
if e.errno != errno.EEXIST:
raise
tmp_path = path +
with open(tmp_path, ) as fh:
while True:
chunk = response.read(8196)
if chunk:
fh.write(chunk)
else:
break
os.rename(tmp_path, path)
return path | Download the data at a URL, and cache it under the given name.
The file is stored under `pyav/test` with the given name in the directory
:envvar:`PYAV_TESTDATA_DIR`, or the first that is writeable of:
- the current virtualenv
- ``/usr/local/share``
- ``/usr/local/lib``
- ``/usr/share``
- ``/usr/lib``
- the user's home |
8,721 | def _write_error_batch_wait(future, batch, database, measurement,
measurements):
if not future.done():
ioloop.IOLoop.current().add_timeout(
ioloop.IOLoop.current().time() + 0.025,
_write_error_batch_wait, future, batch, database, measurement,
measurements)
return
error = future.exception()
if isinstance(error, httpclient.HTTPError):
if error.code == 400:
LOGGER.error(
, database, batch, error.code,
error.response.body)
LOGGER.info(,
database, batch, measurement)
else:
LOGGER.error(
,
database, batch, error.code)
measurements = measurements + [measurement]
elif isinstance(error, (TimeoutError, OSError, socket.error,
select.error, ssl.socket_error)):
LOGGER.error(
, database, batch, error)
_write_error_batch(batch, database, measurements + [measurement])
measurements = measurements + [measurement]
if not measurements:
LOGGER.info(,
database, batch)
return
_write_error_batch(batch, database, measurements) | Invoked by the IOLoop, this method checks if the HTTP request future
created by :meth:`_write_error_batch` is done. If it's done it will
evaluate the result, logging any error and moving on to the next
measurement. If there are no measurements left in the `measurements`
argument, it will consider the batch complete.
:param tornado.concurrent.Future future: The AsyncHTTPClient request future
:param str batch: The batch ID
:param str database: The database name for the measurements
:param str measurement: The measurement the future is for
:param list measurements: The measurements that failed to write as a batch |
8,722 | def _activity_import_doc(self, time_doc, activities):
batch_updates = [time_doc]
td_start = time_doc[]
activities = filter(lambda act: (act[0] < td_start and
act[1] in time_doc), activities)
creation_field = self.lconfig.get()
activities.sort(reverse=True, key=lambda o: o[0])
new_doc = {}
for when, field, removed, added in activities:
last_doc = batch_updates.pop()
if last_doc[] == when:
new_doc = deepcopy(last_doc)
last_doc = batch_updates.pop()
else:
new_doc = deepcopy(last_doc)
new_doc[] = when
new_doc[] = when
last_doc[] = when
last_val = last_doc[field]
new_val, inconsistent = self._activity_backwards(new_doc[field],
removed, added)
new_doc[field] = new_val
if inconsistent:
self._log_inconsistency(last_doc, last_val, field,
removed, added, when)
new_doc[] = {} if not new_doc.get() else new_doc[]
return batch_updates | Import activities for a single document into timeline. |
8,723 | def byteorder_isnative(byteorder):
if byteorder in (, sys.byteorder):
return True
keys = {: , : }
return keys.get(byteorder, byteorder) == keys[sys.byteorder] | Return if byteorder matches the system's byteorder.
>>> byteorder_isnative('=')
True |
8,724 | def calculate_leaf_paths(self):
reverse_xref = {}
leaves = set()
for v in self.value.values():
if v.leaf:
leaves.add(v)
for xref in v.value_xref:
reverse_xref.setdefault(xref, []).append(v.ident)
for leaf in leaves:
self.calculate_leaf_path(leaf, reverse_xref) | Build map of reverse xrefs then traverse backwards marking path to leaf for all leaves. |
8,725 | def update_text(self):
self.write()
try:
newtext = self.text_queue.get_nowait()
self._text = newtext
except Empty:
pass | Write the current text, and check for any new text changes.
This also updates the elapsed time. |
8,726 | def gcm_send_message(registration_id, data, encoding=, **kwargs):
messenger = GCMMessenger(registration_id, data, encoding=encoding, **kwargs)
return messenger.send_plain() | Standalone method to send a single gcm notification |
8,727 | def consume(self, data):
if not self._started:
self.fire(JSONStreamer.DOC_START_EVENT)
self._started = True
self._file_like.write(data)
try:
self._parser.parse(self._file_like)
except YajlError as ye:
raise JSONStreamerException(ye.value) | Takes input that must be parsed
Note:
Attach all your listeners before calling this method
Args:
data (str): input json string |
8,728 | def run_filter_radia(job, bams, radia_file, univ_options, radia_options, chrom):
job.fileStore.logToMaster( % (univ_options[], chrom))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
: bams[],
: bams[],
: bams[],
: bams[],
: bams[],
: bams[],
: radia_file,
: radia_options[],
: radia_options[]
}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
filterradia_output = .join([, chrom, ])
filterradia_log = .join([work_dir, , chrom,
])
parameters = [univ_options[],
chrom.lstrip(),
input_files[],
,
,
, ,
, ,
, ,
, ,
, ,
, ,
,
, ,
,
,
, input_files[],
,
, docker_path(filterradia_log)]
docker_call(tool=, tool_parameters=parameters,
work_dir=work_dir, dockerhub=univ_options[])
output_files = defaultdict()
output_files[filterradia_output] = \
job.fileStore.writeGlobalFile(.join([work_dir, ,
univ_options[], ,
chrom, ]))
output_files[os.path.basename(filterradia_log)] = \
job.fileStore.writeGlobalFile(filterradia_log)
return output_files | This module will run filterradia on the RNA and DNA bams.
ARGUMENTS
1. bams: REFER ARGUMENTS of run_radia()
2. univ_options: REFER ARGUMENTS of run_radia()
3. radia_file: <JSid of vcf generated by run_radia()>
3. radia_options: REFER ARGUMENTS of run_radia()
4. chrom: REFER ARGUMENTS of run_radia()
RETURN VALUES
1. Dict of filtered radia output vcf and logfile
|- 'radia_filtered_CHROM.vcf': <JSid>
+- 'radia_filtered_CHROM_radia.log': <JSid> |
8,729 | def find_all(self, predicate):
for _nid, entry in self._registry.items():
if predicate(entry):
yield entry | Returns a generator that produces a sequence of Entry objects for which the predicate returned True.
Args:
predicate: A callable that returns a value coercible to bool. |
8,730 | def master_event(type, master=None):
event_map = {: ,
: ,
: ,
: }
if type == and master is not None:
return .format(event_map.get(type), master)
return event_map.get(type, None) | Centralized master event function which will return event type based on event_map |
8,731 | def remove_optional(annotation: type):
origin = getattr(annotation, , None)
args = getattr(annotation, , ())
if origin == Union and len(args) == 2 and args[1] == type(None):
return args[0]
else:
return annotation | Optional[X] annotations are actually represented as Union[X, NoneType].
For our purposes, the "Optional" part is not interesting, so here we
throw it away. |
8,732 | def node_to_text(self, node, prev_node_hint=None):
if node is None:
return ""
if node.isNodeType(latexwalker.LatexCharsNode):
if not self.strict_latex_spaces[] and len(node.chars.strip()) == 0:
return ""
return node.chars
if node.isNodeType(latexwalker.LatexCommentNode):
if self.keep_comments:
if self.strict_latex_spaces[]:
return + node.comment +
else:
return + node.comment + node.comment_post_space
else:
if self.strict_latex_spaces[]:
return ""
else:
return node.comment_post_space
if node.isNodeType(latexwalker.LatexGroupNode):
contents = self._groupnodecontents_to_text(node)
if self.keep_braced_groups and len(contents) >= self.keep_braced_groups_minlen:
return "{" + contents + "}"
return contents
def apply_simplify_repl(node, simplify_repl, nodelistargs, what):
if callable(simplify_repl):
if in getfullargspec(simplify_repl)[0]:
return simplify_repl(node, l2tobj=self)
return simplify_repl(node)
if in simplify_repl:
try:
return simplify_repl % tuple([self._groupnodecontents_to_text(nn)
for nn in nodelistargs])
except (TypeError, ValueError):
logger.warning(
"WARNING: Error in configuration: {} failed its substitution!".format(what)
)
return simplify_repl
return simplify_repl
if node.isNodeType(latexwalker.LatexMacroNode):
macroname = node.macroname.rstrip()
if macroname in self.macro_dict:
mac = self.macro_dict[macroname]
else:
mac = self.macro_dict[]
def get_macro_str_repl(node, macroname, mac):
if mac.simplify_repl:
return apply_simplify_repl(node, mac.simplify_repl, node.nodeargs,
what="macro "%(macroname))
if mac.discard:
return ""
a = node.nodeargs
if (node.nodeoptarg):
a.prepend(node.nodeoptarg)
return "".join([self._groupnodecontents_to_text(n) for n in a])
macrostr = get_macro_str_repl(node, macroname, mac)
return macrostr
if node.isNodeType(latexwalker.LatexEnvironmentNode):
envname = node.envname.rstrip()
if (envname in self.env_dict):
envdef = self.env_dict[envname]
else:
envdef = self.env_dict[]
if envdef.simplify_repl:
return apply_simplify_repl(node, envdef.simplify_repl, node.nodelist,
what="environment "%(envname))
if envdef.discard:
return ""
return self._nodelistcontents_to_text(node.nodelist)
if node.isNodeType(latexwalker.LatexMathNode):
if self.keep_inline_math:
return latexwalker.math_node_to_latex(node)
else:
with _PushEquationContext(self):
return self._nodelistcontents_to_text(node.nodelist)
logger.warning("LatexNodes2Text.node_to_text(): Unknown node: %r", node)
return "" | Return the textual representation of the given `node`.
If `prev_node_hint` is specified, then the current node is formatted
suitably as following the node given in `prev_node_hint`. This might
affect how much space we keep/discard, etc. |
8,733 | def show_quickref(self):
from IPython.core.usage import quick_reference
self.main.help.show_plain_text(quick_reference) | Show IPython Cheat Sheet |
8,734 | def run_opt(self, popsize, numgen, processors,
plot=False, log=False, **kwargs):
self._params[] = popsize
self._params[] = numgen
self._params[] = processors
self._params[] = plot
self._params[] = log
self._params.update(**kwargs)
self.halloffame = tools.HallOfFame(1)
self.stats = tools.Statistics(lambda thing: thing.fitness.values)
self.stats.register("avg", numpy.mean)
self.stats.register("std", numpy.std)
self.stats.register("min", numpy.min)
self.stats.register("max", numpy.max)
self.logbook = tools.Logbook()
self.logbook.header = ["gen", "evals"] + self.stats.fields
self._params[] = 0
start_time = datetime.datetime.now()
self.initialize_pop()
for g in range(self._params[]):
self.update_pop()
self.halloffame.update(self.population)
self.logbook.record(gen=g, evals=self._params[],
**self.stats.compile(self.population))
print(self.logbook.stream)
end_time = datetime.datetime.now()
time_taken = end_time - start_time
self._params[] = time_taken
print("Evaluated {0} models in total".format(
self._params[]))
print("Best fitness is {0}".format(self.halloffame[0].fitness))
print("Best parameters are {0}".format(self.parse_individual(
self.halloffame[0])))
for i, entry in enumerate(self.halloffame[0]):
if entry > 0.95:
print(
"Warning! Parameter {0} is at or near maximum allowed "
"value\n".format(i + 1))
elif entry < -0.95:
print(
"Warning! Parameter {0} is at or near minimum allowed "
"value\n".format(i + 1))
if self._params[]:
self.log_results()
if self._params[]:
print()
plt.figure(figsize=(5, 5))
plt.plot(range(len(self.logbook.select())),
self.logbook.select())
plt.xlabel(, fontsize=20)
plt.ylabel(, fontsize=20) | Runs the optimizer.
:param popsize:
:param numgen:
:param processors:
:param plot:
:param log:
:param kwargs:
:return: |
8,735 | def parse_csv(self, infile, delimiter=",", decimal_sep="."):
"Parse template format csv file and create elements dict"
keys = (,,,,,,,,
,,,,,
,,, )
self.elements = []
self.pg_no = 0
if not PY3K:
f = open(infile, )
else:
f = open(infile)
for row in csv.reader(f, delimiter=delimiter):
kargs = {}
for i,v in enumerate(row):
if not v.startswith("name'].lower() for v in self.elements] | Parse template format csv file and create elements dict |
8,736 | def merge(revision, branch_label, message, list_revisions=):
alembic_command.merge(
config=get_config(),
revisions=list_revisions,
message=message,
branch_label=branch_label,
rev_id=revision
) | Merge two revision together, create new revision file |
8,737 | def get_containers(self):
images = {}
for x in self._docker.images.list(filters={"label": "org.inginious.grading.name"}):
try:
title = x.labels["org.inginious.grading.name"]
created = datetime.strptime(x.attrs[][:-4], "%Y-%m-%dT%H:%M:%S.%f").timestamp()
ports = [int(y) for y in x.labels["org.inginious.grading.ports"].split(
",")] if "org.inginious.grading.ports" in x.labels else []
images[x.attrs[]] = {"title": title, "created": created, "ports": ports}
except:
logging.getLogger("inginious.agent").exception("Container %s is badly formatted", title)
latest = {}
for img_id, img_c in images.items():
if img_c["title"] not in latest or latest[img_c["title"]]["created"] < img_c["created"]:
latest[img_c["title"]] = {"id": img_id, "created": img_c["created"], "ports": img_c["ports"]}
return latest | :return: a dict of available containers in the form
{
"name": { #for example, "default"
"id": "container img id", # "sha256:715c5cb5575cdb2641956e42af4a53e69edf763ce701006b2c6e0f4f39b68dd3"
"created": 12345678 # create date
"ports": [22, 434] # list of ports needed
}
} |
8,738 | def _verify_docker_image_size(self, image_name):
shell_call([, , image_name])
try:
image_size = subprocess.check_output(
[, , , image_name]).strip()
image_size = int(image_size)
except (ValueError, subprocess.CalledProcessError) as e:
logging.error(, e)
return False
logging.info(, image_name, image_size)
if image_size > MAX_DOCKER_IMAGE_SIZE:
logging.error(, MAX_DOCKER_IMAGE_SIZE)
return image_size <= MAX_DOCKER_IMAGE_SIZE | Verifies size of Docker image.
Args:
image_name: name of the Docker image.
Returns:
True if image size is within the limits, False otherwise. |
8,739 | def default(self, statement: Statement) -> Optional[bool]:
if self.default_to_shell:
if not in self.exclude_from_history:
self.history.append(statement)
return self.do_shell(statement.command_and_args)
else:
err_msg = self.default_error.format(statement.command)
self.decolorized_write(sys.stderr, "{}\n".format(err_msg)) | Executed when the command given isn't a recognized command implemented by a do_* method.
:param statement: Statement object with parsed input |
8,740 | def get_pr_review_status(pr: PullRequestDetails) -> Any:
url = ("https://api.github.com/repos/{}/{}/pulls/{}/reviews"
"?access_token={}".format(pr.repo.organization,
pr.repo.name,
pr.pull_id,
pr.repo.access_token))
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
.format(
response.status_code, response.content))
return json.JSONDecoder().decode(response.content.decode()) | References:
https://developer.github.com/v3/pulls/reviews/#list-reviews-on-a-pull-request |
8,741 | def submit_combine(basename, readers, job_ids=None, project_name=None):
sub = PmidSubmitter(basename, readers, project_name)
sub.job_list = job_ids
sub.submit_combine()
return sub | Submit a batch job to combine the outputs of a reading job.
This function is provided for backwards compatibility. You should use the
PmidSubmitter and submit_combine methods. |
8,742 | def patch(
target, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs
):
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
) | `patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases. |
8,743 | def invert_affine_mat44(mat):
inverted = Matrix44()
for i in range(3):
for j in range(3):
inverted.data[i][j] = mat.data[j][i]
for row in range(3):
inverted.data[3][row] = (
-inverted.data[0][row] * mat.data[3][0] +
-inverted.data[1][row] * mat.data[3][1] +
-inverted.data[2][row] * mat.data[3][2])
return inverted | Assumes there is only rotate, translate, and uniform scale componenets
to the matrix. |
8,744 | def getReadLengths(reads, gapChars):
gapChars = set(gapChars)
result = {}
for read in reads:
result[read.id] = len(read) - sum(
character in gapChars for character in read.sequence)
return result | Get all read lengths, excluding gap characters.
@param reads: A C{Reads} instance.
@param gapChars: A C{str} of sequence characters considered to be gaps.
@return: A C{dict} keyed by read id, with C{int} length values. |
8,745 | def codegen(lang,
i,
schema_metadata,
loader
):
j = schema.extend_and_specialize(i, loader)
gen = None
if lang == "python":
gen = PythonCodeGen(sys.stdout)
elif lang == "java":
gen = JavaCodeGen(schema_metadata.get("$base", schema_metadata.get("id")))
else:
raise Exception("Unsupported code generation language " % lang)
assert gen is not None
gen.prologue()
document_roots = []
for rec in j:
if rec["type"] in ("enum", "record"):
gen.type_loader(rec)
gen.add_vocab(shortname(rec["name"]), rec["name"])
for rec in j:
if rec["type"] == "enum":
for symbol in rec["symbols"]:
gen.add_vocab(shortname(symbol), symbol)
if rec["type"] == "record":
if rec.get("documentRoot"):
document_roots.append(rec["name"])
field_names = []
for field in rec.get("fields", []):
field_names.append(shortname(field["name"]))
idfield = ""
for field in rec.get("fields", []):
if field.get("jsonldPredicate") == "@id":
idfield = field.get("name")
gen.begin_class(rec["name"], aslist(rec.get("extends", [])), rec.get("doc", ""),
rec.get("abstract", False), field_names, idfield)
gen.add_vocab(shortname(rec["name"]), rec["name"])
for field in rec.get("fields", []):
if field.get("jsonldPredicate") == "@id":
fieldpred = field["name"]
optional = bool("https://w3id.org/cwl/salad
uri_loader = gen.uri_loader(gen.type_loader(field["type"]), True, False, None)
gen.declare_id_field(fieldpred, uri_loader, field.get("doc"), optional)
break
for field in rec.get("fields", []):
optional = bool("https://w3id.org/cwl/salad
type_loader = gen.type_loader(field["type"])
jld = field.get("jsonldPredicate")
fieldpred = field["name"]
if isinstance(jld, MutableMapping):
ref_scope = jld.get("refScope")
if jld.get("typeDSL"):
type_loader = gen.typedsl_loader(type_loader, ref_scope)
elif jld.get("_type") == "@id":
type_loader = gen.uri_loader(type_loader, jld.get("identity", False),
False, ref_scope)
elif jld.get("_type") == "@vocab":
type_loader = gen.uri_loader(type_loader, False, True, ref_scope)
map_subject = jld.get("mapSubject")
if map_subject:
type_loader = gen.idmap_loader(
field["name"], type_loader, map_subject, jld.get("mapPredicate"))
if "_id" in jld and jld["_id"][0] != "@":
fieldpred = jld["_id"]
if jld == "@id":
continue
gen.declare_field(fieldpred, type_loader, field.get("doc"), optional)
gen.end_class(rec["name"], field_names)
root_type = list(document_roots)
root_type.append({
"type": "array",
"items": document_roots
})
gen.epilogue(gen.type_loader(root_type)) | Generate classes with loaders for the given Schema Salad description. |
8,746 | def _split_generators(self, dl_manager):
url = _DL_URLS[self.builder_config.name]
data_dirs = dl_manager.download_and_extract(url)
path_to_dataset = os.path.join(data_dirs, tf.io.gfile.listdir(data_dirs)[0])
train_a_path = os.path.join(path_to_dataset, "trainA")
train_b_path = os.path.join(path_to_dataset, "trainB")
test_a_path = os.path.join(path_to_dataset, "testA")
test_b_path = os.path.join(path_to_dataset, "testB")
return [
tfds.core.SplitGenerator(
name="trainA",
num_shards=10,
gen_kwargs={
"path": train_a_path,
"label": "A",
}),
tfds.core.SplitGenerator(
name="trainB",
num_shards=10,
gen_kwargs={
"path": train_b_path,
"label": "B",
}),
tfds.core.SplitGenerator(
name="testA",
num_shards=1,
gen_kwargs={
"path": test_a_path,
"label": "A",
}),
tfds.core.SplitGenerator(
name="testB",
num_shards=1,
gen_kwargs={
"path": test_b_path,
"label": "B",
}),
] | Returns SplitGenerators. |
8,747 | def process_bool_arg(arg):
if isinstance(arg, bool):
return arg
elif isinstance(arg, basestring):
if arg.lower() in ["true", "1"]:
return True
elif arg.lower() in ["false", "0"]:
return False | Determine True/False from argument |
8,748 | def get_var_dict_from_ctx(ctx: commands.Context, prefix: str = ):
raw_var_dict = {
: ctx.author,
: ctx.bot,
: ctx.channel,
: ctx,
: discord.utils.find,
: discord.utils.get,
: ctx.guild,
: ctx.message,
: ctx.message
}
return {f: v for k, v in raw_var_dict.items()} | Returns the dict to be used in REPL for a given Context. |
8,749 | def exportPreflibFile(self, fileName):
elecType = self.getElecType()
if elecType != "soc" and elecType != "toc" and elecType != "soi" and elecType != "toi":
print("ERROR: printing current type to preflib format is not supported")
exit()
reverseRankMaps = self.getReverseRankMaps()
outfileObj = open(fileName, )
outfileObj.write(str(self.numCands))
for candInt, cand in self.candMap.items():
outfileObj.write("\n" + str(candInt) + "," + cand)
preferenceCount = 0
for preference in self.preferences:
preferenceCount += preference.count
outfileObj.write("\n" + str(self.numVoters) + "," + str(preferenceCount) + "," + str(len(self.preferences)))
for i in range(0, len(reverseRankMaps)):
outfileObj.write("\n" + str(self.preferences[i].count))
reverseRankMap = reverseRankMaps[i]
sortedKeys = sorted(reverseRankMap.keys())
for key in sortedKeys:
cands = reverseRankMap[key]
if len(cands) == 1:
outfileObj.write("," + str(cands[0]))
elif len(cands) > 1:
outfileObj.write(",{" + str(cands[0]))
for j in range(1, len(cands)):
outfileObj.write("," + str(cands[j]))
outfileObj.write("}")
outfileObj.close() | Exports a preflib format file that contains all the information of the current Profile.
:ivar str fileName: The name of the output file to be exported. |
8,750 | def get_spot_value(self, assets, field, dt, data_frequency):
assets_is_scalar = False
if isinstance(assets, (AssetConvertible, PricingDataAssociable)):
assets_is_scalar = True
else:
try:
iter(assets)
except TypeError:
raise TypeError(
"Unexpected value of type {}."
.format(type(assets))
)
session_label = self.trading_calendar.minute_to_session_label(dt)
if assets_is_scalar:
return self._get_single_asset_value(
session_label,
assets,
field,
dt,
data_frequency,
)
else:
get_single_asset_value = self._get_single_asset_value
return [
get_single_asset_value(
session_label,
asset,
field,
dt,
data_frequency,
)
for asset in assets
] | Public API method that returns a scalar value representing the value
of the desired asset's field at either the given dt.
Parameters
----------
assets : Asset, ContinuousFuture, or iterable of same.
The asset or assets whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume',
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The spot value of ``field`` for ``asset`` The return type is based
on the ``field`` requested. If the field is one of 'open', 'high',
'low', 'close', or 'price', the value will be a float. If the
``field`` is 'volume' the value will be a int. If the ``field`` is
'last_traded' the value will be a Timestamp. |
8,751 | def new_table_graphicFrame(cls, id_, name, rows, cols, x, y, cx, cy):
graphicFrame = cls.new_graphicFrame(id_, name, x, y, cx, cy)
graphicFrame.graphic.graphicData.uri = GRAPHIC_DATA_URI_TABLE
graphicFrame.graphic.graphicData.append(
CT_Table.new_tbl(rows, cols, cx, cy)
)
return graphicFrame | Return a ``<p:graphicFrame>`` element tree populated with a table
element. |
8,752 | def reply_topic(self, topic_id, content):
data = {
: content,
: self._request.cookies.get()
}
url = % topic_id
r = self.request(url, , data=data)
j = r.json()
if j[] == 0:
return j[][][] | 小组回帖
:return: 帖子 id 或 ``None`` |
8,753 | def get_host_health_temperature_sensors(self, data=None):
data = self.get_host_health_data(data)
d = data[][][]
if not isinstance(d, list):
d = [d]
return d | Get the health Temp Sensor report.
:param: the data to retrieve from the server, defaults to None.
:returns: the dictionary containing the temperature sensors
information.
:raises: IloConnectionError if failed connecting to the iLO.
:raises: IloError, on an error from iLO. |
8,754 | def register_actions(self, shortcut_manager):
assert isinstance(shortcut_manager, ShortcutManager)
self.__shortcut_manager = shortcut_manager
for controller in list(self.__child_controllers.values()):
if controller not in self.__action_registered_controllers:
try:
controller.register_actions(shortcut_manager)
except Exception as e:
logger.error("Error while registering action for {0}: {1}".format(controller.__class__.__name__, e))
self.__action_registered_controllers.append(controller) | Register callback methods for triggered actions in all child controllers.
:param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager: Shortcut Manager Object holding mappings
between shortcuts and actions. |
8,755 | def nmap_discover():
rs = RangeSearch()
rs_parser = rs.argparser
arg = argparse.ArgumentParser(parents=[rs_parser], conflict_handler=)
arg.add_argument(, metavar=, \
help=, \
type=str, choices=[, ])
arguments, nmap_args = arg.parse_known_args()
tag = None
if arguments.type == :
tag =
nmap_args.append()
nmap_args.append()
check_function = include_up_hosts
elif arguments.type == :
tag =
nmap_args.append()
check_function = include_hostnames
ranges = rs.get_ranges(tags=[.format(tag)])
ranges = [r for r in ranges]
ips = []
for r in ranges:
ips.append(r.range)
print_notification("Running nmap with args: {} on {} range(s)".format(nmap_args, len(ips)))
result = nmap(nmap_args, ips)
stats = import_nmap(result, tag, check_function)
stats[] = len(ips)
Logger().log(, "Nmap discover with args: {} on {} range(s)".format(nmap_args, len(ips)), stats)
for r in ranges:
r.add_tag(tag)
r.save() | This function retrieves ranges from jackal
Uses two functions of nmap to find hosts:
ping: icmp / arp pinging of targets
lookup: reverse dns lookup |
8,756 | def _get_labels_right(self, validate=None):
labels = []
for compare_func in self.features:
labels = labels + listify(compare_func.labels_right)
if not is_label_dataframe(labels, validate):
error_msg = "label is not found in the dataframe"
raise KeyError(error_msg)
return unique(labels) | Get all labels of the right dataframe. |
8,757 | def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
assert wait_for_completion is True
storage_group_oid = uri_parms[0]
storage_group_uri = + storage_group_oid
try:
storage_group = hmc.lookup_by_uri(storage_group_uri)
except KeyError:
raise InvalidResourceError(method, uri)
check_required_fields(method, uri, body, [])
candidate_adapter_port_uris = \
storage_group.properties[]
for ap_uri in body[]:
if ap_uri in candidate_adapter_port_uris:
raise ConflictError(method, uri, 483,
"Adapter port is already in candidate "
"list of storage group %s: %s" %
(storage_group.name, ap_uri))
else:
candidate_adapter_port_uris.append(ap_uri) | Operation: Add Candidate Adapter Ports to an FCP Storage Group. |
8,758 | def visit_exec(self, node, parent):
newnode = nodes.Exec(node.lineno, node.col_offset, parent)
newnode.postinit(
self.visit(node.body, newnode),
_visit_or_none(node, "globals", self, newnode),
_visit_or_none(node, "locals", self, newnode),
)
return newnode | visit an Exec node by returning a fresh instance of it |
8,759 | def is_confusable(string, greedy=False, preferred_aliases=[]):
preferred_aliases = [a.upper() for a in preferred_aliases]
outputs = []
checked = set()
for char in string:
if char in checked:
continue
checked.add(char)
char_alias = alias(char)
if char_alias in preferred_aliases:
}
if not greedy:
return [output]
outputs.append(output)
return outputs or False | Checks if ``string`` contains characters which might be confusable with
characters from ``preferred_aliases``.
If ``greedy=False``, it will only return the first confusable character
found without looking at the rest of the string, ``greedy=True`` returns
all of them.
``preferred_aliases=[]`` can take an array of unicode block aliases to
be considered as your 'base' unicode blocks:
- considering ``paρa``,
- with ``preferred_aliases=['latin']``, the 3rd character ``ρ``
would be returned because this greek letter can be confused with
latin ``p``.
- with ``preferred_aliases=['greek']``, the 1st character ``p``
would be returned because this latin letter can be confused with
greek ``ρ``.
- with ``preferred_aliases=[]`` and ``greedy=True``, you'll discover
the 29 characters that can be confused with ``p``, the 23
characters that look like ``a``, and the one that looks like ``ρ``
(which is, of course, *p* aka *LATIN SMALL LETTER P*).
>>> confusables.is_confusable('paρa', preferred_aliases=['latin'])[0]['character']
'ρ'
>>> confusables.is_confusable('paρa', preferred_aliases=['greek'])[0]['character']
'p'
>>> confusables.is_confusable('Abç', preferred_aliases=['latin'])
False
>>> confusables.is_confusable('AlloΓ', preferred_aliases=['latin'])
False
>>> confusables.is_confusable('ρττ', preferred_aliases=['greek'])
False
>>> confusables.is_confusable('ρτ.τ', preferred_aliases=['greek', 'common'])
False
>>> confusables.is_confusable('ρττp')
[{'homoglyphs': [{'c': 'p', 'n': 'LATIN SMALL LETTER P'}], 'alias': 'GREEK', 'character': 'ρ'}]
:param string: A unicode string
:type string: str
:param greedy: Don't stop on finding one confusable character - find all of them.
:type greedy: bool
:param preferred_aliases: Script blocks aliases which we don't want ``string``'s characters
to be confused with.
:type preferred_aliases: list(str)
:return: False if not confusable, all confusable characters and with what they are confusable
otherwise.
:rtype: bool or list |
8,760 | def step(self, **args):
if self.sequenceType == None:
raise AttributeError()
if in args:
if args[]:
self.setContext()
del args[]
elif self.initContext:
self.setContext()
if self.initContext == 0:
for context in list(self.contextLayers.values()):
context.activationSet = 1
for key in args:
args[key] = self.replacePatterns( args[key], key )
inputBankNames = [layer.name for layer in self.layers if layer.kind == ]
outputBankNames = [layer.name for layer in self.layers if layer.kind == ]
inputBankSizes = [layer.size for layer in self.layers if layer.kind == ]
inputBankTotalSize = sum(inputBankSizes)
inputArgSizes = [len(args[name]) for name in inputBankNames if name in args]
inputArgTotalSize = sum(inputArgSizes)
sequenceLength = inputArgTotalSize // inputBankTotalSize
learning = self.learning
totalRetvals = (0.0, 0, 0)
totalPCorrect = {}
for step in range(sequenceLength):
if self.verbosity >= 1 or self.interactive:
print("-----------------------------------Step
dict = {}
dict.update(args)
for name in inputBankNames:
if name in args:
patternLength = self[name].size
offset = step * patternLength
if (offset + patternLength) >= len(args[name]):
dict[name] = args[name][-patternLength:]
else:
dict[name] = args[name][offset:offset+patternLength]
for name in outputBankNames:
if name in args:
patternLength = self[name].size
offset = step * patternLength
if (offset + patternLength) >= len(args[name]):
dict[name] = args[name][-patternLength:]
else:
dict[name] = args[name][offset:offset+patternLength]
for p in self.prediction:
(inName, outName) = p
inLayer = self.getLayer(inName)
if not inLayer.type == :
raise LayerError(Input\, inLayer.type)
outLayer = self.getLayer(outName)
if not outLayer.type == :
raise LayerError(Output\, outLayer.type)
if step == sequenceLength - 1:
start = 0
if not self._sweeping:
raise LayerError("Attempting to predict last item in sequence, but using step(). Use sweep() instead.")
else:
if self.currentSweepCount == None:
pattern = self.getData(self.loadOrder[0])
for key in pattern:
pattern[key] = self.replacePatterns( pattern[key], key )
if inName in inputBankNames:
if inName in pattern:
dict[outName] = pattern[inName][start:start+patternLength]
else:
pattern = self.getData(self.loadOrder[self.currentSweepCount+1])
for key in pattern:
pattern[key] = self.replacePatterns( pattern[key], key )
if inName in inputBankNames:
if inName in pattern:
dict[outName] = pattern[inName][start:start+patternLength]
else:
start = (step + 1) * inLayer.size
dict[outName] = args[inName][start:start+patternLength]
if step < sequenceLength - 1:
if not self.learnDuringSequence:
self.learning = 0
retvals = self.networkStep(**dict)
self.learning = learning
totalRetvals = list(map(lambda x,y: x+y, totalRetvals[:3], retvals[:3]))
sumMerge(totalPCorrect, retvals[3])
totalRetvals.append( totalPCorrect)
return totalRetvals | SRN.step()
Extends network step method by automatically copying hidden
layer activations to the context layer. |
8,761 | def find(entity, **kwargs):
try:
typedfields = entity.typed_fields()
except AttributeError:
typedfields = iterfields(entity.__class__)
matching = [x for x in typedfields if _matches(x, kwargs)]
return matching | Return all TypedFields found on the input `Entity` that were initialized
with the input **kwargs.
Example:
>>> find(myentity, multiple=True, type_=Foo)
Note:
TypedFields.__init__() can accept a string or a class as a type_
argument, but this method expects a class.
Args:
**kwargs: TypedField __init__ **kwargs to search on.
Returns:
A list of TypedFields with matching **kwarg values. |
8,762 | def arraymax(X,Y):
Z = np.zeros((len(X),), int)
A = X <= Y
B = Y < X
Z[A] = Y[A]
Z[B] = X[B]
return Z | Fast "vectorized" max function for element-wise comparison of two numpy arrays.
For two numpy arrays `X` and `Y` of equal length,
return numpy array `Z` such that::
Z[i] = max(X[i],Y[i])
**Parameters**
**X** : numpy array
Numpy array; `len(X) = len(Y)`.
**Y** : numpy array
Numpy array; `len(Y) = len(X)`.
**Returns**
**Z** : numpy array
Numpy array such that `Z[i] = max(X[i],Y[i])`.
**See Also**
:func:`tabular.fast.arraymin` |
8,763 | def server_extensions_handshake(requested, supported):
accepts = {}
for offer in requested:
name = offer.split(";", 1)[0].strip()
for extension in supported:
if extension.name == name:
accept = extension.accept(offer)
if accept is True:
accepts[extension.name] = True
elif accept is not False and accept is not None:
accepts[extension.name] = accept.encode("ascii")
if accepts:
extensions = []
for name, params in accepts.items():
if params is True:
extensions.append(name.encode("ascii"))
else:
params = params.decode("ascii")
if params == "":
extensions.append(("%s" % (name)).encode("ascii"))
else:
extensions.append(("%s; %s" % (name, params)).encode("ascii"))
return b", ".join(extensions)
return None | Agree on the extensions to use returning an appropriate header value.
This returns None if there are no agreed extensions |
8,764 | def rpc(ctx, call, arguments, api):
try:
data = list(eval(d) for d in arguments)
except:
data = arguments
ret = getattr(ctx.bitshares.rpc, call)(*data, api=api)
print_dict(ret) | Construct RPC call directly
\b
You can specify which API to send the call to:
uptick rpc --api assets
You can also specify lists using
uptick rpc get_objects "['2.0.0', '2.1.0']" |
8,765 | def shell_context_processor(self, func: Callable) -> Callable:
self.shell_context_processors.append(func)
return func | Add a shell context processor.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.shell_context_processor
def additional_context():
return context |
8,766 | def exception(self, message, *args, **kwargs):
self.logger.exception(message, *args, **kwargs) | Handle exception |
8,767 | def analysis_download(self, webid, type, run=None, file=None):
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
: self.apikey,
: webid,
: type,
: run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
if file is None:
return (filename, _file.getvalue())
else:
return filename | Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f) |
8,768 | def limit(self, limit):
if limit is None:
raise ValueError("Invalid value for `limit`, must not be `None`")
if limit > 200:
raise ValueError("Invalid value for `limit`, must be a value less than or equal to `200`")
if limit < 1:
raise ValueError("Invalid value for `limit`, must be a value greater than or equal to `1`")
self._limit = limit | Sets the limit of this ListEmployeeWagesRequest.
Maximum number of Employee Wages to return per page. Can range between 1 and 200. The default is the maximum at 200.
:param limit: The limit of this ListEmployeeWagesRequest.
:type: int |
8,769 | def _go_to_line(editor, line):
b = editor.application.current_buffer
b.cursor_position = b.document.translate_row_col_to_index(max(0, int(line) - 1), 0) | Move cursor to this line in the current buffer. |
8,770 | def get_json(self, path: str, params: Dict[str, Any], host: str = ,
session: Optional[requests.Session] = None, _attempt=1) -> Dict[str, Any]:
is_graphql_query = in params and in path
sess = session if session else self._session
try:
self.do_sleep()
if is_graphql_query:
self._ratecontrol_graphql_query(params[])
resp = sess.get(.format(host, path), params=params, allow_redirects=False)
while resp.is_redirect:
redirect_url = resp.headers[]
self.log(.format(host, path, redirect_url))
if redirect_url.startswith(.format(host)):
resp = sess.get(redirect_url if redirect_url.endswith() else redirect_url + ,
params=params, allow_redirects=False)
else:
break
if resp.status_code == 400:
raise QueryReturnedBadRequestException("400 Bad Request")
if resp.status_code == 404:
raise QueryReturnedNotFoundException("404 Not Found")
if resp.status_code == 429:
raise TooManyRequestsException("429 Too Many Requests")
if resp.status_code != 200:
raise ConnectionException("HTTP error code {}.".format(resp.status_code))
is_html_query = not is_graphql_query and not "__a" in params and host == "www.instagram.com"
if is_html_query:
match = re.search(r, resp.text)
if match is None:
raise ConnectionException("Could not find \"window._sharedData\" in html response.")
return json.loads(match.group(1))
else:
resp_json = resp.json()
if in resp_json and resp_json[] != "ok":
if in resp_json:
raise ConnectionException("Returned \"{}\" status, message \"{}\".".format(resp_json[],
resp_json[]))
else:
raise ConnectionException("Returned \"{}\" status.".format(resp_json[]))
return resp_json
except (ConnectionException, json.decoder.JSONDecodeError, requests.exceptions.RequestException) as err:
error_string = "JSON Query to {}: {}".format(path, err)
if _attempt == self.max_connection_attempts:
raise ConnectionException(error_string) from err
self.error(error_string + " [retrying; skip with ^C]", repeat_at_end=False)
try:
if isinstance(err, TooManyRequestsException):
self._ratecontrol_graphql_query(params[], untracked_queries=True)
return self.get_json(path=path, params=params, host=host, session=sess, _attempt=_attempt + 1)
except KeyboardInterrupt:
self.error("[skipped by user]", repeat_at_end=False)
raise ConnectionException(error_string) from err | JSON request to Instagram.
:param path: URL, relative to the given domain which defaults to www.instagram.com/
:param params: GET parameters
:param host: Domain part of the URL from where to download the requested JSON; defaults to www.instagram.com
:param session: Session to use, or None to use self.session
:return: Decoded response dictionary
:raises QueryReturnedBadRequestException: When the server responds with a 400.
:raises QueryReturnedNotFoundException: When the server responds with a 404.
:raises ConnectionException: When query repeatedly failed. |
8,771 | def match_hostname(cert, hostname):
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
try:
host_ip = ipaddress.ip_address(six.text_type(hostname))
except ValueError:
host_ip = None
dnsnames = []
san = cert.get(, ())
for key, value in san:
if key == :
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == :
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
if not dnsnames:
for sub in cert.get(, ()):
for key, value in sub:
if key == :
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError(
"hostname %r doesn, t match %r"
% (hostname, dnsnames[0])
)
else:
raise CertificateError(
"no appropriate commonName or "
"subjectAltName fields were found"
) | Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing. |
8,772 | def update_metric_by_name(self, metric_name, metric_type, description=None,
custom_properties=None, tags=None, **kwargs):
data = {: metric_type.upper(),
: description or ,
: custom_properties or {},
: tags or []}
resp = self._put(self._u(self._METRIC_ENDPOINT_SUFFIX,
str(metric_name)),
data=data, **kwargs)
resp.raise_for_status()
return resp.json() | Create or update a metric object
Args:
metric_name (string): name of metric
type (string): metric type, must be one of 'gauge', 'counter',
'cumulative_counter'
description (optional[string]): a description
custom_properties (optional[dict]): dictionary of custom properties
tags (optional[list of strings]): list of tags associated with
metric |
8,773 | def write_state_file(self):
fh = open(, )
state = {}
state[] = self.vpc_id
state[] = self.sg_id
state[] = self.sn_ids
state[] = self.instances
state["instanceState"] = self.instance_states
fh.write(json.dumps(state, indent=4)) | Save information that must persist to a file.
We do not want to create a new VPC and new identical security groups, so we save
information about them in a file between runs. |
8,774 | def add_node(self, node, weight=1):
self._nodes.add(node)
self._weights[node] = weight
self._hashring = dict()
self._sorted_keys = []
self._build_circle() | Adds node to circle and rebuild it. |
8,775 | def _append_instruction(self, obj, qargs=None):
if isinstance(obj, Instruction):
chan = None
if obj.name == :
chan = SuperOp(
np.array([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0],
[0, 0, 0, 0]]))
if obj.name == :
kraus = obj.params
dim = len(kraus[0])
chan = SuperOp(_to_superop(, (kraus, None), dim, dim))
elif hasattr(obj, ):
try:
kraus = [obj.to_matrix()]
dim = len(kraus[0])
chan = SuperOp(
_to_superop(, (kraus, None), dim, dim))
except QiskitError:
pass
if chan is not None:
op = self.compose(chan, qargs=qargs)
self._data = op.data
else:
| Update the current Operator by apply an instruction. |
8,776 | def respond_to_SIGHUP(signal_number, frame, logger=None):
global restart
restart = True
if logger:
logger.info()
raise KeyboardInterrupt | raise the KeyboardInterrupt which will cause the app to effectively
shutdown, closing all it resources. Then, because it sets 'restart' to
True, the app will reread all the configuration information, rebuild all
of its structures and resources and start running again |
8,777 | def calc_asymptotic_covariance(hessian, fisher_info_matrix):
hess_inv = scipy.linalg.inv(hessian)
return np.dot(hess_inv, np.dot(fisher_info_matrix, hess_inv)) | Parameters
----------
hessian : 2D ndarray.
It should have shape `(num_vars, num_vars)`. It is the matrix of second
derivatives of the total loss across the dataset, with respect to each
pair of coefficients being estimated.
fisher_info_matrix : 2D ndarray.
It should have a shape of `(num_vars, num_vars)`. It is the
approximation of the negative of the expected hessian formed by taking
the outer product of (each observation's gradient of the loss function)
with itself, and then summing across all observations.
Returns
-------
huber_white_matrix : 2D ndarray.
Will have shape `(num_vars, num_vars)`. The entries in the returned
matrix are calculated by the following formula:
`hess_inverse * fisher_info_matrix * hess_inverse`. |
8,778 | def _comparison_functions(cls, partial=False):
def prerelease_cmp(a, b):
if a and b:
return identifier_list_cmp(a, b)
elif a:
return -1
elif b:
return 1
else:
return 0
def build_cmp(a, b):
if a == b:
return 0
else:
return NotImplemented
def make_optional(orig_cmp_fun):
@functools.wraps(orig_cmp_fun)
def alt_cmp_fun(a, b):
if a is None or b is None:
return 0
return orig_cmp_fun(a, b)
return alt_cmp_fun
if partial:
return [
base_cmp,
make_optional(base_cmp),
make_optional(base_cmp),
make_optional(prerelease_cmp),
make_optional(build_cmp),
]
else:
return [
base_cmp,
base_cmp,
base_cmp,
prerelease_cmp,
build_cmp,
] | Retrieve comparison methods to apply on version components.
This is a private API.
Args:
partial (bool): whether to provide 'partial' or 'strict' matching.
Returns:
5-tuple of cmp-like functions. |
8,779 | def follow(user, obj):
follow, created = Follow.objects.get_or_create(user, obj)
return follow | Make a user follow an object |
8,780 | def has_plugin(self, name=None, plugin_type=None):
if name is None and plugin_type is None:
return len(self.plugins) > 0
return name in self.plugins | Check if the manager has a plugin / plugin(s), either by its name, type, or simply checking if the
manager has any plugins registered in it.
Utilizing the name argument will check if a plugin with that name exists in the manager.
Using both the name and plugin_type arguments will check if a plugin with that name, and type, exists.
Using only the plugin_type argument will check if any plugins matching the type specified are registered
in the plugin manager.
:param name: Name of the plugin to check for.
:param plugin_type: Plugin Type to check for.
:return: |
8,781 | def predict(self, X):
check_rdd(X, (np.ndarray, sp.spmatrix))
if hasattr(self, ):
if isinstance(X, ArrayRDD):
X = X.unblock()
return X.map(lambda x: self._mllib_model.predict(x))
else:
rdd = X.map(lambda X: super(SparkKMeans, self).predict(X))
return ArrayRDD(rdd) | Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : ArrayRDD containing array-like, sparse matrix
New data to predict.
Returns
-------
labels : ArrayRDD with predictions
Index of the cluster each sample belongs to. |
8,782 | def initialize(self):
if not in self._vim.vars:
self._vim.vars[] =
self._vim.command()
| Sets up initial ensime-vim editor settings. |
8,783 | def _get_cache(self):
if not self._cache:
self._cache = get_cache(self.app)
return self._cache | Return the cache to use for thundering herd protection, etc. |
8,784 | def resize(self, size=None):
if not self.operation.israw():
return
size = size or tty.size(self.operation.stdout)
if size is not None:
rows, cols = size
try:
self.operation.resize(height=rows, width=cols)
except IOError:
pass | Resize the container's PTY.
If `size` is not None, it must be a tuple of (height,width), otherwise
it will be determined by the size of the current TTY. |
8,785 | def _db_upgrade(self, db_name):
current_db_version = self._get_db_version()
self._execute(db_schema.functions)
for i in range(current_db_version, nipap.__db_version__):
self._logger.info("Upgrading DB schema:", i, "to", i+1)
upgrade_sql = db_schema.upgrade[i-1]
self._execute(upgrade_sql % (db_name))
self._execute(db_schema.triggers) | Upgrade nipap database schema |
8,786 | def transformFromNative(obj):
obj.isNative = False
obj.value = serializeFields(obj.value, NAME_ORDER)
return obj | Replace the Name in obj.value with a string. |
8,787 | def pivot_pandas_to_excel(soup, show_intermediate_breakdown=False, show_total_breakdown=False):
col1col2col3indexcol4sumcol4COL1COL2AllROW1ROW2ALL
tables = soup.findAll()
for table in tables:
table.thead.findChildren()[1].decompose()
new_body = Tag(name=)
bc = 0
num_columns_max = max(len(row.findAll()) for row in table.tbody.findAll())
num_headers_max = max(len(row.findAll()) for row in table.tbody.findAll())
last = False
for row in table.tbody.findChildren():
headers = list(row.findChildren())
data = list(row.findChildren())
if len(headers) > 1:
if in headers[0].contents:
last = True
indent = 0
first_header = (len(headers) == num_headers_max)
if not last:
for header in headers[:-1]:
new_row = Tag(name=, attrs={: }) if first_header else Tag(name=, attrs={: })
if not header.contents:
continue
new_header = Tag(name=, attrs={: ,
: + str(10*(num_headers_max - len(headers) + indent)) + }) if first_header else \
Tag(name=, attrs={: ,
: + str(10*(num_headers_max - len(headers) + indent)) + })
new_header.contents = header.contents
new_row.insert(0, new_header)
for j in range(num_columns_max-1):
new_row.insert(j+1, Tag(name=, attrs={: })) if first_header else new_row.insert(j+1, Tag(name=, attrs={: }))
new_body.insert(bc, new_row)
bc += 1
indent += 1
first_header = False
new_row = Tag(name=)
new_header = Tag(name=, attrs={: ,
: + str(10*(num_headers_max-1)) + })
new_header.contents = headers[-1].contents
if in headers[-1].contents:
if not show_intermediate_breakdown and not last:
continue
elif not show_intermediate_breakdown:
last = False
new_row = Tag(name=, attrs={: })
new_header = Tag(name=, attrs={: })
new_header.contents = [NavigableString()]
elif not last:
pass
if last:
continue
new_row.insert(0, new_header)
cc = 1
for _ in range(num_columns_max - len(data) - 1):
new_header = Tag(name=, attrs={: }) if in headers[-1].contents else Tag(name=)
new_row.insert(cc, new_header)
cc += 1
for dat in data:
new_data = Tag(name=) if not in headers[-1].contents else Tag(name=, attrs={: })
new_data.contents = dat.contents
new_row.insert(cc, new_data)
cc += 1
new_body.insert(bc, new_row)
bc += 1
table.tbody.replaceWith(new_body)
return soup | pandas style pivot to excel style pivot formatting for outlook/html
This function is meant to be provided to the email functionality as a postprocessor.
It expects a jupyter or pandas exported html table of a dataframe with the following index:
example:
# a single pivot
pt1 = pd.pivot_table(data,
value=['col1', 'col2', 'col3'],
index=['index'],
columns=['col4'],
aggfunc='sum',
margins=True).stack('col4')
# here we reindex the table to have the appropriate row ordering
pt1 = pt1.reindex(
pd.MultiIndex(
levels=[['COL1', 'COL2', 'All'],
['ROW1', 'ROW2', 'ALL']],
labels=[[0, 0, 0, 1, 1, 1, 2], # This is the key, changing the label order (2-ALL)
[2, 0, 1, 2, 0, 1, 2]],
names=['', ''],
sortorder=0)
).fillna(0)
show_intermediate_breakdown --> intermediate sumations to be shown?
show_total_breakdown --> total sumations to be shown? |
8,788 | def load_plugins(self, plugin_path):
self.logger.debug(.format(plugin_path))
plugins = {}
plugin_dir = os.path.realpath(plugin_path)
sys.path.append(plugin_dir)
for f in os.listdir(plugin_dir):
if f.endswith(".py"):
name = f[:-3]
elif f.endswith(".pyc"):
name = f[:-4]
else:
continue
try:
self.logger.debug(.format(f))
mod = __import__(name, globals(), locals(), [], 0)
for plugin_class in inspect.getmembers(mod):
if plugin_class[0][0:2] == :
continue
if hasattr(plugin_class[1], ):
if plugin_class[1].config_name is not None:
plugins[plugin_class[1].config_name] = plugin_class[1]
self.logger.debug(.format(plugin_class[1].config_name))
except ImportError as e:
self.logger.error(e)
pass
self.logger.debug()
return plugins | Loads plugins from modules in plugin_path. Looks for the config_name property
in each object that's found. If so, adds that to the dictionary with the
config_name as the key. config_name should be unique between different plugins.
:param plugin_path: Path to load plugins from
:return: dictionary of plugins by config_name |
8,789 | def execute_code_block(elem, doc):
command = select_executor(elem, doc).split()
code = elem.text
if in elem.attributes or in elem.classes:
code = save_plot(code, elem)
command.append(code)
if in elem.attributes:
for arg in elem.attributes[].split():
command.append(arg)
cwd = elem.attributes[] if in elem.attributes else None
return subprocess.run(command,
encoding=,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd).stdout | Executes a code block by passing it to the executor.
Args:
elem The AST element.
doc The document.
Returns:
The output of the command. |
8,790 | async def subscribe(
schema: GraphQLSchema,
document: DocumentNode,
root_value: Any = None,
context_value: Any = None,
variable_values: Dict[str, Any] = None,
operation_name: str = None,
field_resolver: GraphQLFieldResolver = None,
subscribe_field_resolver: GraphQLFieldResolver = None,
) -> Union[AsyncIterator[ExecutionResult], ExecutionResult]:
try:
result_or_stream = await create_source_event_stream(
schema,
document,
root_value,
context_value,
variable_values,
operation_name,
subscribe_field_resolver,
)
except GraphQLError as error:
return ExecutionResult(data=None, errors=[error])
if isinstance(result_or_stream, ExecutionResult):
return result_or_stream
result_or_stream = cast(AsyncIterable, result_or_stream)
async def map_source_to_response(payload):
result = execute(
schema,
document,
payload,
context_value,
variable_values,
operation_name,
field_resolver,
)
return await result if isawaitable(result) else result
return MapAsyncIterator(result_or_stream, map_source_to_response) | Create a GraphQL subscription.
Implements the "Subscribe" algorithm described in the GraphQL spec.
Returns a coroutine object which yields either an AsyncIterator (if successful) or
an ExecutionResult (client error). The coroutine will raise an exception if a server
error occurs.
If the client-provided arguments to this function do not result in a compliant
subscription, a GraphQL Response (ExecutionResult) with descriptive errors and no
data will be returned.
If the source stream could not be created due to faulty subscription resolver logic
or underlying systems, the coroutine object will yield a single ExecutionResult
containing `errors` and no `data`.
If the operation succeeded, the coroutine will yield an AsyncIterator, which yields
a stream of ExecutionResults representing the response stream. |
8,791 | def set_password(username,
password,
encrypted=False,
role=None,
crypt_salt=None,
algorithm=,
**kwargs):
t save configuration commands to startup configuration.
If False, save configuration to startup configuration.
Default: False
.. code-block:: bash
salt nxos.cmd set_password admin TestPass
salt nxos.cmd set_password admin \\
password= \\
encrypted=True
username {0} password 5 {1} role {0}'.format(role)
return config(password_line, **kwargs) | Set users password on switch.
username
Username to configure
password
Password to configure for username
encrypted
Whether or not to encrypt the password
Default: False
role
Configure role for the username
Default: None
crypt_salt
Configure crypt_salt setting
Default: None
alogrithm
Encryption algorithm
Default: sha256
no_save_config
If True, don't save configuration commands to startup configuration.
If False, save configuration to startup configuration.
Default: False
.. code-block:: bash
salt '*' nxos.cmd set_password admin TestPass
salt '*' nxos.cmd set_password admin \\
password='$5$2fWwO2vK$s7.Hr3YltMNHuhywQQ3nfOd.gAPHgs3SOBYYdGT3E.A' \\
encrypted=True |
8,792 | def sort_projects(
self,
workflowTags):
self.refresh
if not isinstance(workflowTags, list):
workflowTagsLists = workflowTags.strip().replace(",", "").replace("@", "")
workflowTagsLists = workflowTagsLists.split(" ")
else:
workflowTagsLists = []
workflowTagsLists[:] = [l.replace("@", "") for l in workflowTags]
matchedProjects = collections.OrderedDict()
unmatchedProjects = []
for wt in workflowTagsLists:
matchedProjects[wt.lower()] = []
for p in self.projects:
matched = False
for pt in p.tags:
if matched:
break
for wt in workflowTagsLists:
thisTag = pt.lower()
if "(" not in wt:
thisTag = pt.split("(")[0].lower()
if thisTag == wt.lower() and matched == False:
matchedProjects[wt.lower()].append(p)
matched = True
break
if matched == False:
unmatchedProjects.append(p)
sortedProjects = []
for k, v in matchedProjects.iteritems():
sortedProjects += v
sortedProjects += unmatchedProjects
self.projects = sortedProjects
self.content = self.to_string(
title=False, projects=sortedProjects, indentLevel=0)
for p in self.projects:
p.projects = p.sort_projects(workflowTags)
oldContent = self.to_string(indentLevel=1)
newContent = self.to_string(
indentLevel=1, projects=sortedProjects)
if self.parent:
self.parent._update_document_tree(
oldContent=oldContent,
newContent=newContent
)
self.content = self.content.replace(self.to_string(indentLevel=0, title=False), self.to_string(
indentLevel=0, title=False, projects=sortedProjects))
self.refresh
return sortedProjects | *order the projects within this taskpaper object via a list of tags*
The order of the tags in the list dictates the order of the sort - first comes first*
**Key Arguments:**
- ``workflowTags`` -- a string of space/comma seperated tags.
**Return:**
- ``None``
**Usage:**
To recursively sort the projects within a taskpaper document with the following order:
1. *@due*
2. *@flag*
3. *@hold*
4. *@next*
5. *@someday*
6. *@wait*
use the following:
.. code-block:: python
doc.sort_projects("@due, @flag, @hold, @next, @someday, @wait") |
8,793 | def iter_subclasses(class_):
ensure_class(class_)
classes = set()
def descend(class_):
subclasses = set(class_.__subclasses__()) - classes
classes.update(subclasses)
return subclasses
result = breadth_first(class_, descend)
next(result)
return result | Iterate over all the subclasses (and subclasses thereof, etc.)
of given class.
:param class_: Class to yield the subclasses of
:return: Iterable of subclasses, sub-subclasses, etc. of ``class_`` |
8,794 | def get_update_status_brok(self):
data = {: self.uuid}
self.fill_data_brok_from(data, )
return Brok({: + self.my_type + , : data}) | Create an update item brok
:return: Brok object
:rtype: alignak.Brok |
8,795 | def knx_to_date(knxdata):
if len(knxdata) != 3:
raise KNXException("Can only convert a 3 Byte object to date")
year = knxdata[2]
if year >= 90:
year += 1900
else:
year += 2000
return date(year, knxdata[1], knxdata[0]) | Convert a 3 byte KNX data object to a date |
8,796 | def bowtie_alignment_plot (self):
keys = OrderedDict()
keys[] = { : , : }
keys[] = { : , : }
keys[] = { : , : }
config = {
: ,
: ,
: ,
:
}
self.add_section(
description = ,
helptext = ,
plot = bargraph.plot(self.bowtie_data, keys, config)
) | Make the HighCharts HTML to plot the alignment rates |
8,797 | def _get_path(self, file):
dir = self._cache_directory()
if not os.path.exists(dir):
os.makedirs(dir)
return os.path.join(dir, file) | Creates the cache directory if it doesn't already exist. Returns the
full path to the specified file inside the cache directory. |
8,798 | def decompressBWT(inputDir, outputDir, numProcs, logger):
s original form. While unusual to do,
itt care
@param numProcs - number of processes we
msbwt = MultiStringBWT.CompressedMSBWT()
msbwt.loadMsbwt(inputDir, logger)
outputFile = np.lib.format.open_memmap(outputDir+, , , (msbwt.getTotalSize(),))
del outputFile
worksize = 1000000
tups = [None]*(msbwt.getTotalSize()/worksize+1)
x = 0
if msbwt.getTotalSize() > worksize:
for x in xrange(0, msbwt.getTotalSize()/worksize):
tups[x] = (inputDir, outputDir, x*worksize, (x+1)*worksize)
tups[-1] = (inputDir, outputDir, (x+1)*worksize, msbwt.getTotalSize())
else:
tups[0] = (inputDir, outputDir, 0, msbwt.getTotalSize())
if numProcs > 1:
myPool = multiprocessing.Pool(numProcs)
rets = myPool.map(decompressBWTPoolProcess, tups)
else:
rets = []
for tup in tups:
rets.append(decompressBWTPoolProcess(tup)) | This is called for taking a BWT and decompressing it back out to it's original form. While unusual to do,
it's included in this package for completion purposes.
@param inputDir - the directory of the compressed BWT we plan on decompressing
@param outputFN - the directory for the output decompressed BWT, it can be the same, we don't care
@param numProcs - number of processes we're allowed to use
@param logger - log all the things! |
8,799 | def set_scan_option(self, scan_id, name, value):
return self.scan_collection.set_option(scan_id, name, value) | Sets a scan's option to a provided value. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.