Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
6,600 | def get_mark_css(aes_name, css_value):
css_prop = AES_CSS_MAP[aes_name]
if isinstance(css_value, list):
return get_mark_css_for_rules(aes_name, css_prop, css_value)
else:
return get_mark_simple_css(aes_name, css_prop, css_value) | Generate CSS class for <mark> tag.
Parameters
----------
aes_name: str
The name of the class.
css_value: str
The value for the CSS property defined by aes_name.
Returns
-------
list of str
The CSS codeblocks |
6,601 | def pack(self, value=None):
if isinstance(value, type(self)):
return value.pack()
if self.pcp is None and self.cfi is None and self.vid is None:
return b
self.pcp = self.pcp if self.pcp is not None else 0
self.cfi = self.cfi if self.cfi is not None else 0
self.vid = self.vid if self.vid is not None else 0
self._tci = self.pcp << 13 | self.cfi << 12 | self.vid
return super().pack() | Pack the struct in a binary representation.
Merge some fields to ensure correct packing.
If no arguments are set for a particular instance, it is interpreted as
abscence of VLAN information, and the pack() method will return an
empty binary string.
Returns:
bytes: Binary representation of this instance. |
6,602 | def qos_map_cos_traffic_class_cos3(self, **kwargs):
config = ET.Element("config")
qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos")
map = ET.SubElement(qos, "map")
cos_traffic_class = ET.SubElement(map, "cos-traffic-class")
name_key = ET.SubElement(cos_traffic_class, "name")
name_key.text = kwargs.pop()
cos3 = ET.SubElement(cos_traffic_class, "cos3")
cos3.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
6,603 | def _deserialize_primitive(data, klass):
try:
value = klass(data)
except UnicodeEncodeError:
value = six.u(data)
except TypeError:
value = data
return value | Deserializes to primitive type.
:param data: data to deserialize.
:param klass: class literal.
:return: int, long, float, str, bool.
:rtype: int | long | float | str | bool |
6,604 | def embed(self, name, data=None):
if data is None:
with open(name, ) as fp:
data = fp.read()
name = os.path.basename(name)
elif isinstance(data, bytes):
pass
elif hasattr(data, ):
data = data.read()
else:
raise TypeError("Unable to read image contents")
subtype = imghdr.what(None, data)
self.attach(name, data, , subtype, True) | Attach an image file and prepare for HTML embedding.
This method should only be used to embed images.
:param name: Path to the image to embed if data is None, or the name
of the file if the ``data`` argument is given
:param data: Contents of the image to embed, or None if the data is to
be read from the file pointed to by the ``name`` argument |
6,605 | def deleteByPk(self, pk):
obj = self.mdl.objects.getOnlyIndexedFields(pk)
if not obj:
return 0
return self.deleteOne(obj) | deleteByPk - Delete object associated with given primary key |
6,606 | def backward(self, speed=1):
if isinstance(self.enable_device, DigitalOutputDevice):
if speed not in (0, 1):
raise ValueError(
)
self.enable_device.off()
self.phase_device.on()
self.enable_device.value = speed | Drive the motor backwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed). |
6,607 | def design_list(self):
ret = self._http_request(
type=_LCB.LCB_HTTP_TYPE_MANAGEMENT,
path="/pools/default/buckets/{0}/ddocs".format(self._cb.bucket),
method=_LCB.LCB_HTTP_METHOD_GET)
real_rows = {}
for r in ret.value[]:
real_rows[r[][][]] = r[][]
ret.value.clear()
ret.value.update(real_rows)
return ret | List all design documents for the current bucket.
:return: A :class:`~couchbase.result.HttpResult` containing
a dict, with keys being the ID of the design document.
.. note::
This information is derived using the
``pools/default/buckets/<bucket>ddocs`` endpoint, but the return
value has been modified to match that of :meth:`design_get`.
.. note::
This function returns both 'production' and 'development' mode
views. These two can be distinguished by the name of the
design document being prefixed with the ``dev_`` identifier.
The keys of the dict in ``value`` will be of the form
``_design/<VIEWNAME>`` where ``VIEWNAME`` may either be e.g.
``foo`` or ``dev_foo`` depending on whether ``foo`` is a
production or development mode view.
::
for name, ddoc in mgr.design_list().value.items():
if name.startswith('_design/dev_'):
print "Development view!"
else:
print "Production view!"
Example::
for name, ddoc in mgr.design_list().value.items():
print 'Design name {0}. Contents {1}'.format(name, ddoc)
.. seealso:: :meth:`design_get` |
6,608 | def BoolEncoder(field_number, is_repeated, is_packed):
false_byte = b
true_byte = b
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField | Returns an encoder for a boolean field. |
6,609 | def _read_quoted(ctx: ReaderContext) -> llist.List:
start = ctx.reader.advance()
assert start == "'"
next_form = _read_next_consuming_comment(ctx)
return llist.l(_QUOTE, next_form) | Read a quoted form from the input stream. |
6,610 | def mobile_sign(self, id_code, country, phone_nr, language=None, signing_profile=):
if not (self.container and isinstance(self.container, PreviouslyCreatedContainer)):
assert self.data_files, \
response = self.__invoke(, {
: id_code,
: country,
: phone_nr,
: self.parse_language(language),
: SkipValue,
: SkipValue,
: SkipValue,
: SkipValue,
: SkipValue,
: self.service_name,
: self.mobile_message,
: signing_profile,
: ,
: SkipValue,
: SkipValue,
: SkipValue,
})
return response | This can be used to add a signature to existing data files
WARNING: Must have at least one datafile in the session |
6,611 | def process_input_graph(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
input_graph = args[0]
if isinstance(input_graph, nx.DiGraph):
return func(*args, **kwargs)
else:
nx_graph = dict_to_nx(args[0], oriented=True)
args = [nx_graph] + list(args[1:])
return func(*args, **kwargs)
return wrapped_func | Decorator, ensuring first argument is a networkx graph object.
If the first arg is a dict {node: succs}, a networkx graph equivalent
to the dict will be send in place of it. |
6,612 | def xcorr_plot(template, image, shift=None, cc=None, cc_vec=None, **kwargs):
import matplotlib.pyplot as plt
if cc is None or shift is None:
if not isinstance(cc_vec, np.ndarray):
print( % (cc, shift))
raise IOError()
shift = np.abs(cc_vec).argmax()
cc = cc_vec[shift]
x = np.arange(len(image))
plt.plot(x, image / abs(image).max(), , lw=1.3, label=)
x = np.arange(len(template)) + shift
plt.plot(x, template / abs(template).max(), , lw=1.1, label=)
plt.title( % (shift, cc))
fig = plt.gcf()
fig = _finalise_figure(fig=fig, **kwargs)
return fig | Plot a template overlying an image aligned by correlation.
:type template: numpy.ndarray
:param template: Short template image
:type image: numpy.ndarray
:param image: Long master image
:type shift: int
:param shift: Shift to apply to template relative to image, in samples
:type cc: float
:param cc: Cross-correlation at shift
:type cc_vec: numpy.ndarray
:param cc_vec: Cross-correlation vector.
:type save: bool
:param save: Whether to save the plot or not.
:type savefile: str
:param savefile: File name to save to
:returns: :class:`matplotlib.figure.Figure`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.plotting import xcorr_plot
>>> from eqcorrscan.utils.stacking import align_traces
>>> st = read().detrend('simple').filter('bandpass', freqmin=2, freqmax=15)
>>> shifts, ccs = align_traces([st[0], st[1]], 40)
>>> shift = shifts[1] * st[1].stats.sampling_rate
>>> cc = ccs[1]
>>> xcorr_plot(template=st[1].data, image=st[0].data, shift=shift,
... cc=cc) # doctest: +SKIP
.. image:: ../../plots/xcorr_plot.png |
6,613 | def _call_and_format(self, req, props=None):
if not isinstance(req, dict):
return err_response(None, ERR_INVALID_REQ,
"Invalid Request. %s is not an object." % str(req))
reqid = None
if req.has_key("id"):
reqid = req["id"]
if props == None:
props = { }
context = RequestContext(props, req)
if self.filters:
for f in self.filters:
f.pre(context)
if context.error:
return context.error
resp = None
try:
result = self._call(context)
resp = { "jsonrpc": "2.0", "id": reqid, "result": result }
except RpcException, e:
resp = err_response(reqid, e.code, e.msg, e.data)
except:
self.log.exception("Error processing request: %s" % str(req))
resp = err_response(reqid, ERR_UNKNOWN, "Server error. Check logs for details.")
if self.filters:
context.response = resp
for f in self.filters:
f.post(context)
return resp | Invokes a single request against a handler using _call() and traps any errors,
formatting them using _err(). If the request is successful it is wrapped in a
JSON-RPC 2.0 compliant dict with keys: 'jsonrpc', 'id', 'result'.
:Parameters:
req
A single dict representing a single JSON-RPC request
props
Application defined properties to set on RequestContext for use with filters.
For example: authentication headers. Must be a dict. |
6,614 | def create_cmdclass(prerelease_cmd=None, package_data_spec=None,
data_files_spec=None):
wrapped = [prerelease_cmd] if prerelease_cmd else []
if package_data_spec or data_files_spec:
wrapped.append()
wrapper = functools.partial(_wrap_command, wrapped)
handle_files = _get_file_handler(package_data_spec, data_files_spec)
if in sys.argv:
egg = wrapper(bdist_egg, strict=True)
else:
egg = bdist_egg_disabled
cmdclass = dict(
build_py=wrapper(build_py, strict=is_repo),
bdist_egg=egg,
sdist=wrapper(sdist, strict=True),
handle_files=handle_files,
)
if bdist_wheel:
cmdclass[] = wrapper(bdist_wheel, strict=True)
cmdclass[] = wrapper(develop, strict=True)
return cmdclass | Create a command class with the given optional prerelease class.
Parameters
----------
prerelease_cmd: (name, Command) tuple, optional
The command to run before releasing.
package_data_spec: dict, optional
A dictionary whose keys are the dotted package names and
whose values are a list of glob patterns.
data_files_spec: list, optional
A list of (path, dname, pattern) tuples where the path is the
`data_files` install path, dname is the source directory, and the
pattern is a glob pattern.
Notes
-----
We use specs so that we can find the files *after* the build
command has run.
The package data glob patterns should be relative paths from the package
folder containing the __init__.py file, which is given as the package
name.
e.g. `dict(foo=['./bar/*', './baz/**'])`
The data files directories should be absolute paths or relative paths
from the root directory of the repository. Data files are specified
differently from `package_data` because we need a separate path entry
for each nested folder in `data_files`, and this makes it easier to
parse.
e.g. `('share/foo/bar', 'pkgname/bizz, '*')` |
6,615 | def parse_unit(item, group, slash):
surface = item.group(group).replace(, )
power = re.findall(r % r.SUPERSCRIPTS, surface)
if power:
power = [r.UNI_SUPER[i] if i in r.UNI_SUPER else i for i
in power]
power = .join(power)
new_power = (-1 * int(power) if slash else int(power))
surface = re.sub(r % r.SUPERSCRIPTS, , surface)
elif re.findall(r, surface):
new_power = (-3 if slash else 3)
surface = re.sub(r, , surface).strip()
elif re.findall(r, surface):
new_power = (-2 if slash else 2)
surface = re.sub(r, , surface).strip()
else:
new_power = (-1 if slash else 1)
return surface, new_power | Parse surface and power from unit text. |
6,616 | def _build_kernel(self):
kernel = self.build_kernel()
kernel = self.symmetrize_kernel(kernel)
kernel = self.apply_anisotropy(kernel)
if (kernel - kernel.T).max() > 1e-5:
warnings.warn("K should be symmetric", RuntimeWarning)
if np.any(kernel.diagonal == 0):
warnings.warn("K should have a non-zero diagonal", RuntimeWarning)
return kernel | Private method to build kernel matrix
Runs public method to build kernel matrix and runs
additional checks to ensure that the result is okay
Returns
-------
Kernel matrix, shape=[n_samples, n_samples]
Raises
------
RuntimeWarning : if K is not symmetric |
6,617 | def start(self):
print( % (self.host, self.port))
self.state = RpcServer._STATE_RUN
while self.state == RpcServer._STATE_RUN:
self.server.handle_request()
self.server.server_close()
self.state = RpcServer._STATE_IDLE | Serving loop |
6,618 | def _compute_rtfilter_map(self):
rtfilter_map = {}
def get_neigh_filter(neigh):
neigh_filter = rtfilter_map.get(neigh)
if neigh_filter is None:
neigh_filter = set()
rtfilter_map[neigh] = neigh_filter
return neigh_filter
if self._common_config.max_path_ext_rtfilter_all:
for rtcdest in self._table_manager.get_rtc_table().values():
known_path_list = rtcdest.known_path_list
for path in known_path_list:
neigh = path.source
if neigh is None:
continue
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
else:
for rtcdest in self._table_manager.get_rtc_table().values():
path = rtcdest.best_path
if not path:
continue
neigh = path.source
if neigh and neigh.is_ebgp_peer():
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
else:
known_path_list = rtcdest.known_path_list
for path in known_path_list:
neigh = path.source
if neigh and not neigh.is_ebgp_peer():
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
return rtfilter_map | Returns neighbor's RT filter (permit/allow filter based on RT).
Walks RT filter tree and computes current RT filters for each peer that
have advertised RT NLRIs.
Returns:
dict of peer, and `set` of rts that a particular neighbor is
interested in. |
6,619 | def start(self, test_connection=True):
self._detect_fork()
super(ForkAwareLockerClient, self).start(test_connection) | Checks for forking and starts/restarts if desired |
6,620 | def set_reverb(self, roomsize=-1.0, damping=-1.0, width=-1.0, level=-1.0):
set=0
if roomsize>=0:
set+=0b0001
if damping>=0:
set+=0b0010
if width>=0:
set+=0b0100
if level>=0:
set+=0b1000
return fluid_synth_set_reverb_full(self.synth, set, roomsize, damping, width, level) | roomsize Reverb room size value (0.0-1.2)
damping Reverb damping value (0.0-1.0)
width Reverb width value (0.0-100.0)
level Reverb level value (0.0-1.0) |
6,621 | def handle_notification(self, data):
_LOGGER.debug("Received notification from the device..")
if data[0] == PROP_INFO_RETURN and data[1] == 1:
_LOGGER.debug("Got status: %s" % codecs.encode(data, ))
status = Status.parse(data)
_LOGGER.debug("Parsed status: %s", status)
self._raw_mode = status.mode
self._valve_state = status.valve
self._target_temperature = status.target_temp
if status.mode.BOOST:
self._mode = Mode.Boost
elif status.mode.AWAY:
self._mode = Mode.Away
self._away_end = status.away
elif status.mode.MANUAL:
if status.target_temp == EQ3BT_OFF_TEMP:
self._mode = Mode.Closed
elif status.target_temp == EQ3BT_ON_TEMP:
self._mode = Mode.Open
else:
self._mode = Mode.Manual
else:
self._mode = Mode.Auto
_LOGGER.debug("Valve state: %s", self._valve_state)
_LOGGER.debug("Mode: %s", self.mode_readable)
_LOGGER.debug("Target temp: %s", self._target_temperature)
_LOGGER.debug("Away end: %s", self._away_end)
elif data[0] == PROP_SCHEDULE_RETURN:
parsed = self.parse_schedule(data)
self._schedule[parsed.day] = parsed
else:
_LOGGER.debug("Unknown notification %s (%s)", data[0], codecs.encode(data, )) | Handle Callback from a Bluetooth (GATT) request. |
6,622 | def environment_schedule_unset(self, name):
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
self._call("environmentScheduleUnset",
in_p=[name]) | Schedules unsetting (removing) an environment variable when creating
the next guest process. This affects the
:py:func:`IGuestSession.environment_changes` attribute.
in name of type str
Name of the environment variable to unset. This cannot be empty
nor can it contain any equal signs. |
6,623 | def find(cls, name):
if not cls.mapping:
for _, obj in inspect.getmembers(exceptions):
if inspect.isclass(obj):
if issubclass(obj, exceptions.NSQException):
if hasattr(obj, ):
cls.mapping[obj.name] = obj
klass = cls.mapping.get(name)
if klass == None:
raise TypeError( % name)
return klass | Find the exception class by name |
6,624 | def p_expr_GT_expr(p):
p[0] = make_binary(p.lineno(2), , p[1], p[3], lambda x, y: x > y) | expr : expr GT expr |
6,625 | def Increment(self, delta, fields=None):
if delta < 0:
raise ValueError(
"Counter increment should not be < 0 (received: %d)" % delta)
self._metric_values[_FieldsToKey(fields)] = self.Get(fields=fields) + delta | Increments counter value by a given delta. |
6,626 | def path_in_cache(self, filename, metahash):
cpath = self._genpath(filename, metahash)
if os.path.exists(cpath):
return cpath
else:
raise CacheMiss | Generates the path to a file in the mh cache.
The generated path does not imply the file's existence!
Args:
filename: Filename relative to buildroot
rule: A targets.SomeBuildRule object
metahash: hash object |
6,627 | def check_attr(node, n):
if len(node.children) > n:
return node.children[n] | Check if ATTR has to be normalized
after this instruction has been translated
to intermediate code. |
6,628 | def escape(msg):
msg = msg.replace(escape_character, )
for escape_key, irc_char in format_dict.items():
msg = msg.replace(irc_char, escape_character + escape_key)
new_msg =
while len(msg):
if msg.startswith(escape_character + ):
new_msg += msg[:2]
msg = msg[2:]
if not len(msg):
new_msg +=
continue
colours, msg = extract_irc_colours(msg)
new_msg += colours
else:
new_msg += msg[0]
msg = msg[1:]
new_msg = new_msg.replace(, escape_character + escape_character)
return new_msg | Takes a raw IRC message and returns a girc-escaped message. |
6,629 | def get_encoding_name(self, encoding):
encoding = CodePages.get_encoding_name(encoding)
if encoding not in self.codepages:
raise ValueError((
).format(encoding, .join(self.codepages.keys())))
return encoding | Given an encoding provided by the user, will return a
canonical encoding name; and also validate that the encoding
is supported.
TODO: Support encoding aliases: pc437 instead of cp437. |
6,630 | def _retrieve_config_xml(config_xml, saltenv):
ret = __salt__[](config_xml, saltenv)
if not ret:
raise CommandExecutionError(.format(config_xml))
return ret | Helper to cache the config XML and raise a CommandExecutionError if we fail
to do so. If we successfully cache the file, return the cached path. |
6,631 | def _fill_get_item_cache(self, catalog, key):
lang = self._get_lang()
keylist = self.get_all(catalog)
self.ITEM_CACHE[lang][catalog] = dict([(i[], i[]) for i in keylist])
return self.ITEM_CACHE[lang][catalog].get(key) | get from redis, cache locally then return
:param catalog: catalog name
:param key:
:return: |
6,632 | def merge_dicts(d1, d2, _path=None):
if _path is None:
_path = ()
if isinstance(d1, dict) and isinstance(d2, dict):
for k, v in d2.items():
if isinstance(v, MissingValue) and v.name is None:
v.name = .join(_path + (k,))
if isinstance(v, DeletedValue):
d1.pop(k, None)
elif k not in d1:
if isinstance(v, dict):
d1[k] = merge_dicts({}, v, _path + (k,))
else:
d1[k] = v
else:
if isinstance(d1[k], dict) and isinstance(v, dict):
d1[k] = merge_dicts(d1[k], v, _path + (k,))
elif isinstance(d1[k], list) and isinstance(v, list):
d1[k] += v
elif isinstance(d1[k], MissingValue):
d1[k] = v
elif d1[k] is None:
d1[k] = v
elif type(d1[k]) == type(v):
d1[k] = v
else:
raise TypeError(
% (type(d1[k]), type(v)))
else:
raise TypeError( % (type(d1), type(d2)))
return d1 | Merge dictionary d2 into d1, overriding entries in d1 with values from d2.
d1 is mutated.
_path is for internal, recursive use. |
6,633 | def startswith(haystack, prefix):
if haystack is None:
return None
if sys.version_info[0] < 3:
return haystack.startswith(prefix)
return to_bytes(haystack).startswith(to_bytes(prefix)) | py3 comp startswith
:param haystack:
:param prefix:
:return: |
6,634 | def remove_nans_1D(*args) -> tuple:
vals = np.isnan(args[0])
for a in args:
vals |= np.isnan(a)
return tuple(np.array(a)[~vals] for a in args) | Remove nans in a set of 1D arrays.
Removes indicies in all arrays if any array is nan at that index.
All input arrays must have the same size.
Parameters
----------
args : 1D arrays
Returns
-------
tuple
Tuple of 1D arrays in same order as given, with nan indicies removed. |
6,635 | def query_edges_by_pubmed_identifiers(self, pubmed_identifiers: List[str]) -> List[Edge]:
fi = and_(Citation.type == CITATION_TYPE_PUBMED, Citation.reference.in_(pubmed_identifiers))
return self.session.query(Edge).join(Evidence).join(Citation).filter(fi).all() | Get all edges annotated to the documents identified by the given PubMed identifiers. |
6,636 | def ks_synth(freq):
ks_mem = (sum(lz.sinusoid(x * freq) for x in [1, 3, 9]) +
lz.white_noise() + lz.Stream(-1, 1)) / 5
return lz.karplus_strong(freq, memory=ks_mem) | Synthesize the given frequency into a Stream by using a model based on
Karplus-Strong. |
6,637 | def get_author_tags(index_page):
dom = dhtmlparser.parseString(index_page)
authors = [
get_html_authors(dom),
get_dc_authors(dom),
]
return sum(authors, []) | Parse `authors` from HTML ``<meta>`` and dublin core.
Args:
index_page (str): HTML content of the page you wisht to analyze.
Returns:
list: List of :class:`.SourceString` objects. |
6,638 | def transform(self, attrs):
self.collect(attrs)
self.add_missing_implementations()
self.fill_attrs(attrs) | Perform all actions on a given attribute dict. |
6,639 | def diff_commonPrefix(self, text1, text2):
if not text1 or not text2 or text1[0] != text2[0]:
return 0
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerstart = 0
while pointermin < pointermid:
if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:
pointermin = pointermid
pointerstart = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid | Determine the common prefix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the start of each string. |
6,640 | def getArguments(parser):
"Provides additional validation of the arguments collected by argparse."
args = parser.parse_args()
if args.order < 0 or args.order > 5:
parser.error()
return args | Provides additional validation of the arguments collected by argparse. |
6,641 | def alignment_changed(self, settings, key, user_data):
RectCalculator.set_final_window_rect(self.settings, self.guake.window)
self.guake.set_tab_position()
self.guake.force_move_if_shown() | If the gconf var window_halignment be changed, this method will
be called and will call the move function in guake. |
6,642 | def makeAudibleSong(self):
sound0=n.hstack((sy.render(220,d=1.5),
sy.render(220*(2**(7/12)),d=2.5),
sy.render(220*(2**(-5/12)),d=.5),
sy.render(220*(2**(0/12)),d=1.5),
))
sound1=n.hstack((sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
))
sound2=n.hstack((sy.render(220*(2**(0/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(-1/12)),d=2.0),
))
sound3=n.hstack((n.zeros(44100),
sy.render(220*(2**(-1/12)),d=.5),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(-1/12)),d=1.75),
sy.render(220*(2**(-1/12)),d=.25),
))
sound4=n.hstack((
sy.render(220*(2**(0/12)),d=1.),
sy.render(220*(2**(7/12)),d=.5),
sy.render(220*(2**(11/12)),d=.5),
sy.render(220*(2**(12/12)),d=.75),
sy.render(220*(2**(11/12)),d=.25),
sy.render(220*(2**(12/12)),d=1.),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(7/12)),d=2.),
sy.render(220*(2**(-1/12)),d=2.),
n.zeros(2*44100)
))
sound=n.hstack((sound0,sound1,sound2,sound3,sound4))
UT.write(sound,"sound.wav") | Use mass to render wav soundtrack. |
6,643 | def _AddClearFieldMethod(message_descriptor, cls):
def ClearField(self, field_name):
try:
field = message_descriptor.fields_by_name[field_name]
except KeyError:
try:
field = message_descriptor.oneofs_by_name[field_name]
if field in self._oneofs:
field = self._oneofs[field]
else:
return
except KeyError:
raise ValueError( %
(message_descriptor.name, field_name))
if field in self._fields:
if hasattr(self._fields[field], ):
self._fields[field].InvalidateIterators()
del self._fields[field]
if self._oneofs.get(field.containing_oneof, None) is field:
del self._oneofs[field.containing_oneof]
self._Modified()
cls.ClearField = ClearField | Helper for _AddMessageMethods(). |
6,644 | def login_to_portal(username, password, client, retries=2, delay=0):
if not client.session_id:
client.request_session()
concierge_request_header = client.construct_concierge_header(
url=("http://membersuite.com/contracts/IConciergeAPIService/"
"LoginToPortal"))
attempts = 0
while attempts < retries:
if attempts:
time.sleep(delay)
result = client.client.service.LoginToPortal(
_soapheaders=[concierge_request_header],
portalUserName=username,
portalPassword=password)
login_to_portal_result = result["body"]["LoginToPortalResult"]
if login_to_portal_result["Success"]:
portal_user = login_to_portal_result["ResultValue"]["PortalUser"]
session_id = get_session_id(result=result)
return PortalUser(membersuite_object_data=portal_user,
session_id=session_id)
else:
attempts += 1
try:
error_code = login_to_portal_result[
"Errors"]["ConciergeError"][0]["Code"]
except IndexError:
continue
else:
if attempts < retries and error_code == "GeneralException":
continue
raise LoginToPortalError(result=result) | Log `username` into the MemberSuite Portal.
Returns a PortalUser object if successful, raises
LoginToPortalError if not.
Will retry logging in if a GeneralException occurs, up to `retries`.
Will pause `delay` seconds between retries. |
6,645 | def ws_disconnect(message):
language = message.channel_session[]
gr = Group(.format(language))
gr.discard(message.reply_channel) | Channels connection close.
Deregister the client |
6,646 | def wheel(self, package, options=None):
if self.readonly:
raise VirtualenvReadonlyException()
if options is None:
options = []
if isinstance(package, tuple):
package = .join(package)
if not self.is_installed():
raise PackageWheelException((0, "Wheel package must be installed in the virtual environment", package))
if not isinstance(options, list):
raise ValueError("Options must be a list of strings.")
try:
self._execute_pip([, package] + options)
except subprocess.CalledProcessError as e:
raise PackageWheelException((e.returncode, e.output, package)) | Creates a wheel of the given package from this virtual environment,
as specified in pip's package syntax or a tuple of ('name', 'ver'),
only if it is not already installed. Some valid examples:
'Django'
'Django==1.5'
('Django', '1.5')
The `options` is a list of strings that can be used to pass to
pip. |
6,647 | def get_help(self, is_category, item):
data = {"cmd": "help"}
if is_category:
data["category"] = item
else:
data["command"] = item
self._send_packet(data) | Sends documentation on <item> to <callback>.
This can be used for programmatically accessing documentation.
Keyword arguments:
is_category -- <bool>; Set this to <True> if <item> is for
getting documentation on a permission level and
<False> if <item> is for getting documentation on
a command.
item -- <str>; If <is_category> is <True>, this should be one of
<"core">, <"mod"> or <"admin"> to get
documentation on the commands specific to that
permission level. If <is_category> is <False>,
this should be the name of the command to get
documentation on. |
6,648 | def ang2pix(nside, theta, phi, nest=False, lonlat=False):
lon, lat = _healpy_to_lonlat(theta, phi, lonlat=lonlat)
return lonlat_to_healpix(lon, lat, nside, order= if nest else ) | Drop-in replacement for healpy `~healpy.pixelfunc.ang2pix`. |
6,649 | def registerItem(self, regItem):
super(RtiRegistry, self).registerItem(regItem)
for ext in regItem.extensions:
self._registerExtension(ext, regItem) | Adds a ClassRegItem object to the registry. |
6,650 | def add_media_description(self, media_description):
if self.get_media_descriptions_metadata().is_read_only():
raise NoAccess()
self.add_or_replace_value(, media_description) | Adds a media_description.
arg: media_description (displayText): the new media_description
raise: InvalidArgument - ``media_description`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``media_description`` is ``null``
*compliance: mandatory -- This method must be implemented.* |
6,651 | def Main():
args_parser = argparse.ArgumentParser(
description=)
args_parser.add_argument(
,
nargs=,
action=,
metavar=,
default=None,
help=(
))
options = args_parser.parse_args()
if not options.filename:
print()
print()
args_parser.print_help()
print()
return False
if not os.path.isfile(options.filename):
print(.format(options.filename))
print()
return False
print(.format(options.filename))
validator = ArtifactDefinitionsValidator()
if not validator.CheckFile(options.filename):
print()
return False
print()
return True | The main program function.
Returns:
bool: True if successful or False if not. |
6,652 | def _FormatDescription(self, event):
date_time_string = timelib.Timestamp.CopyToIsoFormat(
event.timestamp, timezone=self._output_mediator.timezone)
timestamp_description = event.timestamp_desc or
message, _ = self._output_mediator.GetFormattedMessages(event)
if message is None:
data_type = getattr(event, , )
raise errors.NoFormatterFound(
.format(data_type))
description = .format(
date_time_string, timestamp_description,
message.replace(self._DESCRIPTION_FIELD_DELIMITER, ))
return self._SanitizeField(description) | Formats the description.
Args:
event (EventObject): event.
Returns:
str: formatted description field. |
6,653 | def maximum_cut(G, sampler=None, **sampler_args):
h = {v: 0. for v in G}
J = {(u, v): 1 for u, v in G.edges}
response = sampler.sample_ising(h, J, **sampler_args)
sample = next(iter(response))
return set(v for v in G if sample[v] >= 0) | Returns an approximate maximum cut.
Defines an Ising problem with ground states corresponding to
a maximum cut and uses the sampler to sample from it.
A maximum cut is a subset S of the vertices of G such that
the number of edges between S and the complementary subset
is as large as possible.
Parameters
----------
G : NetworkX graph
The graph on which to find a maximum cut.
sampler
A binary quadratic model sampler. A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem (QUBO). A sampler is expected to have a 'sample_qubo'
and 'sample_ising' method. A sampler is expected to return an
iterable of samples, in order of increasing energy. If no
sampler is provided, one must be provided using the
`set_default_sampler` function.
sampler_args
Additional keyword parameters are passed to the sampler.
Returns
-------
S : set
A maximum cut of G.
Example
-------
This example uses a sampler from
`dimod <https://github.com/dwavesystems/dimod>`_ to find a maximum cut
for a graph of a Chimera unit cell created using the `chimera_graph()`
function.
>>> import dimod
>>> import dwave_networkx as dnx
>>> samplerSA = dimod.SimulatedAnnealingSampler()
>>> G = dnx.chimera_graph(1, 1, 4)
>>> cut = dnx.maximum_cut(G, samplerSA)
Notes
-----
Samplers by their nature may not return the optimal solution. This
function does not attempt to confirm the quality of the returned
sample. |
6,654 | def get_comments_data(self, slug):
all_the_data = []
for item in self.chan.findall("item"):
if not item.find().text == slug:
continue
item_dict = self.item_dict(item)
if not item_dict or not item_dict.get():
continue
slug = item_dict.get() or re.sub(item_dict[],,)
for comment in item.findall("{wp}comment"):
comment = self.translate_wp_comment(comment)
comment[] = slug
all_the_data.append(comment)
return all_the_data | Returns a flat list of all comments in XML dump. Formatted as the JSON
output from Wordpress API.
Keys:
('content', 'slug', 'date', 'status', 'author', 'ID', 'parent')
date format: '%Y-%m-%dT%H:%M:%S'
author: {'username': 'Name', 'URL': ''} |
6,655 | def get_provider(self, name):
if name not in self.providers:
cls = self.provider_classes[name]
self.providers[name] = cls(self)
return self.providers[name] | Allows for lazy instantiation of providers (Jinja2 templating is heavy, so only instantiate it if
necessary). |
6,656 | def mdwarf_subtype_from_sdsscolor(ri_color, iz_color):
t an M dwarf, will return None
`index1`, `index2`: the M-dwarf color locus value and spread of this
object calculated from the `r-i` and `i-z` colors.
M0M1M2M3M4M5M6M7M8M9'
else:
m_class = None
return m_class, obj_sti, obj_sts | This calculates the M-dwarf subtype given SDSS `r-i` and `i-z` colors.
Parameters
----------
ri_color : float
The SDSS `r-i` color of the object.
iz_color : float
The SDSS `i-z` color of the object.
Returns
-------
(subtype, index1, index2) : tuple
`subtype`: if the star appears to be an M dwarf, will return an int
between 0 and 9 indicating its subtype, e.g. will return 4 for an M4
dwarf. If the object isn't an M dwarf, will return None
`index1`, `index2`: the M-dwarf color locus value and spread of this
object calculated from the `r-i` and `i-z` colors. |
6,657 | def _has_branch(branch):
ret = temple.utils.shell(.format(branch),
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
check=False)
return ret.returncode == 0 | Return True if the target branch exists. |
6,658 | def start_log_child(self):
self.stop_log_child()
gconfig = yakonfig.get_global_config()
read_end, write_end = os.pipe()
pid = os.fork()
if pid == 0:
self.clear_signal_handlers()
os.close(write_end)
yakonfig.clear_global_config()
self.log_spewer(gconfig, read_end)
sys.exit(0)
else:
self.debug(, .format(pid))
self.log_child = pid
os.close(read_end)
self.log_fd = write_end | Start the logging child process. |
6,659 | def get(self):
project_checkplots=project_checkplots,
project_cpsortorder=project_cpsortorder,
project_cpsortkey=project_cpsortkey,
project_cpfilterstatements=project_cpfilterstatements,
project_checkplotbasenames=project_checkplotbasenames,
project_checkplotindices=project_checkplotindices,
project_checkplotfile=self.cplistfile,
readonly=self.readonly,
baseurl=self.baseurl) | This handles GET requests to the index page.
TODO: provide the correct baseurl from the checkplotserver options dict,
so the frontend JS can just read that off immediately. |
6,660 | def _parse_spectral_data(
self,
content,
TNSId):
self.log.info()
specData = []
relatedFilesTable = []
classBlock = re.search(
r,
content,
flags=re.S
)
if classBlock:
classBlock = classBlock.group()
reports = re.finditer(
r,
classBlock,
flags=re.S
)
relatedFiles = self._parse_related_files(classBlock)
for r in reports:
header = re.search(
r,
r.group(),
flags=re.S
)
if not header:
continue
header = header.groupdict()
header["TNSId"] = TNSId
del header["reporters"]
del header["surveyGroup"]
del header["survey"]
if not self.comments:
del header[]
else:
theseComments = header[
"sourceComment"].split("\n")
header["sourceComment"] = ""
for c in theseComments:
header["sourceComment"] += " " + c.strip()
header["sourceComment"] = header[
"sourceComment"].strip().replace(, """)[0:750]
s.update(header)
if s["relatedFiles"] and filesAppended == False:
filesAppended = True
for f in relatedFiles:
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = f[
"filepath"].split("/")[-1]
thisFile["url"] = f["filepath"]
if self.comments:
thisFile["comment"] = f[
"fileComment"].replace("\n", " ").strip()
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
for ffile in [s["filepath"], s["fitsFilepath"]]:
if ffile:
thisFile = collections.OrderedDict()
thisFile["TNSId"] = TNSId
thisFile["filename"] = ffile.split(
"/")[-1]
thisFile["url"] = ffile
if self.comments:
thisFile["comment"] = ""
thisFile["dateObs"] = s["obsdate"]
thisFile["spec1phot2"] = 1
relatedFilesTable.append(thisFile)
del s["filepath"]
del s["fitsFilepath"]
del s["relatedFiles"]
orow = collections.OrderedDict()
keyOrder = ["TNSId", "survey", "obsdate", "specType", "transRedshift",
"telescope", "exptime", "reportAddedDate", "TNSuser"]
for k, v in s.iteritems():
if k not in keyOrder:
keyOrder.append(k)
for k in keyOrder:
try:
orow[k] = s[k]
except:
self.log.info(
"`%(k)s` not found in the source data for %(TNSId)s" % locals())
pass
specData.append(orow)
self.log.info()
return specData, relatedFilesTable | *parse spectra data from a row in the tns results content*
**Key Arguments:**
- ``content`` -- a table row from the TNS results page
- ``TNSId`` -- the tns id of the transient
**Return:**
- ``specData`` -- a list of dictionaries of the spectral data
- ``relatedFilesTable`` -- a list of dictionaries of transient spectrum related files |
6,661 | def _replace(variables, match):
expression = match.group(1)
(prefix_char, separator_char, split_fn, escape_fn,
format_fn) = operator_map.get(expression[0], defaults)
replacements = []
for key, modify_fn, explode in split_fn(expression):
if key in variables:
variable = modify_fn(variables[key])
replacement = format_fn(
explode, separator_char, escape_fn, key, variable)
replacements.append(replacement)
if not replacements:
return
return prefix_char + separator_char.join(replacements) | Return the appropriate replacement for `match` using the passed variables |
6,662 | def download(path, source_url):
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
if not os.path.exists(path):
print(.format(source_url, path))
filename = source_url.split()[-1]
def _progress(count, block_size, total_size):
sys.stdout.write(.format(
filename, float(count * block_size) / float(total_size)))
sys.stdout.flush()
try:
urlretrieve(source_url, path, reporthook=_progress)
except:
sys.stdout.write()
if os.path.exists(path):
os.remove(path)
raise
sys.stdout.write()
return path | Download a file to a given path from a given URL, if it does not exist.
Parameters
----------
path: str
The (destination) path of the file on the local filesystem
source_url: str
The URL from which to download the file
Returns
-------
str
The path of the file |
6,663 | def set_zone_order(self, zone_ids):
reordered_zones = []
current_zone_ids = [z[] for z in self.my_osid_object_form._my_map[]]
if set(zone_ids) != set(current_zone_ids):
raise IllegalState()
for zone_id in zone_ids:
for current_zone in self.my_osid_object_form._my_map[]:
if zone_id == current_zone[]:
reordered_zones.append(current_zone)
break
self.my_osid_object_form._my_map[] = reordered_zones | reorder zones per the passed in list
:param zone_ids:
:return: |
6,664 | def checkArgs(args):
for fileName in [args.bfile + i for i in [".bed", ".bim", ".fam"]]:
if not os.path.isfile(fileName):
msg = "%(fileName)s: no such file" % locals()
raise ProgramError(msg)
try:
for i in xrange(2):
tmp = int(args.indep_pairwise[i])
tmp = float(args.indep_pairwise[2])
except ValueError:
msg = "indep-pairwise: need INT INT FLOAT"
raise ProgramError(msg)
tmpMAF = None
try:
tmpMAF = float(args.maf)
except ValueError:
msg = "maf: must be a float, not %s" % args.maf
raise ProgramError(msg)
if (tmpMAF > 0.5) or (tmpMAF < 0.0):
msg = "maf: must be between 0.0 and 0.5, not %s" % args.maf
raise ProgramError(msg)
if args.line_per_file_for_sge < 1:
msg = "line-per-file-for-sge: must be above 0, not " \
"%d" % args.line_per_file_for_sge
raise ProgramError(msg)
if args.min_nb_snp < 1:
msg = "min-nb-snp: must be above 1"
raise ProgramError(msg)
return True | Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1. |
6,665 | def _set_trunk_vlans(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..65535"]},
int_size=16,
),
restriction_dict={"range": ["1..4094"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.((409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])|\\*)"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.((409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])|\\*)"
},
),
RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(\\*|(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9]))\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])"
},
),
]
),
is_leaf=False,
yang_name="trunk-vlans",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/vlan",
defining_module="openconfig-vlan",
yang_type="union",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": ,
"defined-type": "openconfig-vlan:union",
"generated-type": ,
}
)
self.__trunk_vlans = t
if hasattr(self, "_set"):
self._set() | Setter method for trunk_vlans, mapped from YANG variable /interfaces/interface/aggregation/switched_vlan/state/trunk_vlans (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_trunk_vlans is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trunk_vlans() directly.
YANG Description: Specify VLANs, or ranges thereof, that the interface may
carry when in trunk mode. If not specified, all VLANs are
allowed on the interface. Ranges are specified in the form
x..y, where x<y - ranges are assumed to be inclusive (such
that the VLAN range is x <= range <= y. |
6,666 | def unset_env(self, key):
os.environ.pop(make_env_key(self.appname, key), None)
self._registered_env_keys.discard(key)
self._clear_memoization() | Removes an environment variable using the prepended app_name convention with `key`. |
6,667 | def backward(A, pobs, T=None, beta_out=None, dtype=np.float32):
if T is None:
T = pobs.shape[0]
elif T > pobs.shape[0]:
raise ValueError()
N = A.shape[0]
if beta_out is None:
beta_out = np.zeros((T, N), dtype=dtype)
elif T > beta_out.shape[0]:
raise ValueError()
beta_out[T-1, :] = 1.0
scale = np.sum(beta_out[T-1, :])
beta_out[T-1, :] /= scale
for t in range(T-2, -1, -1):
np.dot(A, beta_out[t+1, :] * pobs[t+1, :], out=beta_out[t, :])
scale = np.sum(beta_out[t, :])
beta_out[t, :] /= scale
return beta_out | Compute all backward coefficients. With scaling!
Parameters
----------
A : ndarray((N,N), dtype = float)
transition matrix of the hidden states
pobs : ndarray((T,N), dtype = float)
pobs[t,i] is the observation probability for observation at time t given hidden state i
beta_out : ndarray((T,N), dtype = float), optional, default = None
containter for the beta result variables. If None, a new container will be created.
dtype : type, optional, default = np.float32
data type of the result.
Returns
-------
beta : ndarray((T,N), dtype = float), optional, default = None
beta[t,i] is the ith backward coefficient of time t. These can be
used in many different algorithms related to HMMs. |
6,668 | def finalize(self) -> None:
self.wrap_script()
if not self.statements:
self.task =
return
input_directive = [
idx for idx, statement in enumerate(self.statements)
if statement[0] == and statement[1] ==
]
task_directive = [
idx for idx, statement in enumerate(self.statements)
if statement[0] == and statement[1] ==
]
if len(task_directive) > 1:
raise ValueError()
for idx, statement in enumerate(self.statements):
if statement[0] == and statement[1] == :
if task_directive and task_directive[0] < idx:
raise ValueError(
)
if not in statement[2]:
if in statement[2]:
if not is_type_hint(statement[2]):
raise ValueError(
f
)
name, value = statement[2].split()
else:
name = statement[2]
value =
else:
name, value = statement[2].split(, 1)
name = name.split()[0]
name = name.strip()
if name.startswith():
raise ValueError(
f
)
if not value.strip():
raise ValueError(
f
)
self.statements[idx] = [
,
f,
statement[2].strip()
]
self.parameters[name] = (value, statement[3])
if input_directive and input_directive[0] < idx:
self.substep_parameters.add(name)
if not task_directive:
self.task =
else:
start_task = task_directive[0] + 1
self.task =
for statement in self.statements[start_task:]:
if statement[0] == :
if statement[1] in (, , ):
raise ValueError(
f
)
elif statement[1] == :
raise ValueError(
f
)
elif statement[1] == :
raise ValueError(
f
)
self.task +=
else:
self.task += statement[1]
self.task_params = self.statements[task_directive[0]][2]
self.statements = self.statements[:task_directive[0]]
if len(self.statements) > 1 and self.statements[-1][0] == :
starting = len(self.statements) - 1
for idx in range(starting - 1, -1, -1):
if self.statements[idx][0] == :
starting = idx
else:
break
for idx in range(starting + 1, len(self.statements)):
self.statements[starting][1] += self.statements[idx][1]
self.statements = self.statements[:starting + 1]
if not any(opt in self.options for opt in (, )) and \
len([x for x in self.statements if x[0] == and x[1] == ]) == 1:
output_stmt = [
x for x in self.statements if x[0] == and x[1] ==
][0][2]
output_names = get_names_of_kwargs(output_stmt)
self.options[] = repr(output_names) | split statement and task by last directive |
6,669 | def _parseStylesheet(self, src):
if type(src) == six.binary_type:
src=six.text_type(src)
src = self.re_comment.sub(, src)
src = self._parseAtCharset(src)
src = self._parseSCDOCDC(src)
src, stylesheetImports = self._parseAtImports(src)
src = self._parseAtNamespace(src)
stylesheetElements = []
while src:
if src.startswith():
src, atResults = self._parseAtKeyword(src)
if atResults is not None and atResults != NotImplemented:
stylesheetElements.extend(atResults)
else:
src, ruleset = self._parseRuleset(src)
stylesheetElements.append(ruleset)
src = self._parseSCDOCDC(src)
stylesheet = self.cssBuilder.stylesheet(stylesheetElements, stylesheetImports)
return src, stylesheet | stylesheet
: [ CHARSET_SYM S* STRING S* ';' ]?
[S|CDO|CDC]* [ import [S|CDO|CDC]* ]*
[ [ ruleset | media | page | font_face ] [S|CDO|CDC]* ]*
; |
6,670 | def scheduled_times(self, earliest_time=, latest_time=):
response = self.get("scheduled_times",
earliest_time=earliest_time,
latest_time=latest_time)
data = self._load_atom_entry(response)
rec = _parse_atom_entry(data)
times = [datetime.fromtimestamp(int(t))
for t in rec.content.scheduled_times]
return times | Returns the times when this search is scheduled to run.
By default this method returns the times in the next hour. For different
time ranges, set *earliest_time* and *latest_time*. For example,
for all times in the last day use "earliest_time=-1d" and
"latest_time=now".
:param earliest_time: The earliest time.
:type earliest_time: ``string``
:param latest_time: The latest time.
:type latest_time: ``string``
:return: The list of search times. |
6,671 | def isbinary(*args):
return all(map(lambda c: isnumber(c) or isbool(c), args)) | Checks if value can be part of binary/bitwise operations. |
6,672 | def from_legacy_urlsafe(cls, urlsafe):
urlsafe = _to_bytes(urlsafe, encoding="ascii")
padding = b"=" * (-len(urlsafe) % 4)
urlsafe += padding
raw_bytes = base64.urlsafe_b64decode(urlsafe)
reference = _app_engine_key_pb2.Reference()
reference.ParseFromString(raw_bytes)
project = _clean_app(reference.app)
namespace = _get_empty(reference.name_space, u"")
_check_database_id(reference.database_id)
flat_path = _get_flat_path(reference.path)
return cls(*flat_path, project=project, namespace=namespace) | Convert urlsafe string to :class:`~google.cloud.datastore.key.Key`.
This is intended to work with the "legacy" representation of a
datastore "Key" used within Google App Engine (a so-called
"Reference"). This assumes that ``urlsafe`` was created within an App
Engine app via something like ``ndb.Key(...).urlsafe()``.
:type urlsafe: bytes or unicode
:param urlsafe: The base64 encoded (ASCII) string corresponding to a
datastore "Key" / "Reference".
:rtype: :class:`~google.cloud.datastore.key.Key`.
:returns: The key corresponding to ``urlsafe``. |
6,673 | def schedule(cron, name, params):
if name not in celery.tasks:
exit_with_error(, name)
args = [p for p in params if not in p]
kwargs = dict(p.split() for p in params if in p)
label = .format(job_label(name, args, kwargs))
try:
task = PeriodicTask.objects.get(task=name, args=args, kwargs=kwargs)
task.modify(crontab=PeriodicTask.Crontab.parse(cron))
except PeriodicTask.DoesNotExist:
task = PeriodicTask.objects.create(
task=name,
name=label,
description=.format(name),
enabled=True,
args=args,
kwargs=kwargs,
crontab=PeriodicTask.Crontab.parse(cron),
)
msg =
log.info(msg.format(label=label, cron=task.schedule_display)) | Schedule the job <name> to run periodically given the <cron> expression.
Jobs args and kwargs are given as parameters without dashes.
Ex:
udata job schedule "* * 0 * *" my-job arg1 arg2 key1=value key2=value |
6,674 | def subdomain_check_pending(self, subrec, atlasdb_path, cur=None):
_, _, domain = is_address_subdomain(subrec.get_fqn())
sql = .format(self.subdomain_table)
args = (domain,)
cursor = None
if cur is None:
cursor = self.conn.cursor()
else:
cursor= cur
rows = db_query_execute(cursor, sql, args)
missing_str = ""
try:
rowdata = rows.fetchone()
assert rowdata
missing_str = rowdata[]
except:
pass
known_missing = [int(i) for i in missing_str.split()] if missing_str is not None and len(missing_str) > 0 else []
num_missing = atlasdb_get_zonefiles_missing_count_by_name(domain, indexes_exclude=known_missing, path=atlasdb_path)
if num_missing > 0:
log.debug("Subdomain is missing {} zone files: {}".format(num_missing, subrec))
return num_missing > 0 | Determine whether or not a subdomain record's domain is missing zone files
(besides the ones we expect) that could invalidate its history. |
6,675 | def staticfy(html_file, args=argparse.ArgumentParser()):
static_endpoint = args.static_endpoint or
framework = args.framework or os.getenv(, )
add_tags = args.add_tags or {}
exc_tags = args.exc_tags or {}
namespace = args.namespace or {}
tags = {(, ), (, ), (, )}
add_tags = {(tag, attr) for tag, attr in add_tags.items()}
tags.update(add_tags)
exc_tags = {(tag, attr) for tag, attr in exc_tags.items()}
tags = tags - exc_tags
matches = get_elements(html_file, tags)
transformed = transform(matches, framework, namespace, static_endpoint)
return replace_lines(html_file, transformed) | Staticfy method.
Loop through each line of the file and replaces the old links |
6,676 | def strel_disk(radius):
iradius = int(radius)
x,y = np.mgrid[-iradius:iradius+1,-iradius:iradius+1]
radius2 = radius * radius
strel = np.zeros(x.shape)
strel[x*x+y*y <= radius2] = 1
return strel | Create a disk structuring element for morphological operations
radius - radius of the disk |
6,677 | def normalize_name(self, header_name):
if self.__normalization_mode in [, , ]:
return .join([x.capitalize() for x in header_name.split()])
elif self.__normalization_mode == :
return header_name.lower()
raise RuntimeError( % self.__normalization_mode) | Return header name as it is recommended (required) by corresponding http protocol. For
protocol switching use :meth:`.WHTTPHeaders.switch_name_style` method.
All current available protocols (0.9-2) compare header names in a case-insensitive fashion. However,
previous protocol versions (0.9-1.1) recommends to use camel-case names like Foo or Foo-Bar. But
HTTP/2 (RFC 7540) strictly requires lowercase only header names.
:param header_name: name to convert
:return: str |
6,678 | def delete_user(self, username):
url = self._options[] + % username
r = self._session.delete(url)
if 200 <= r.status_code <= 299:
return True
else:
logging.error(r.status_code)
return False | Deletes a JIRA User.
:param username: Username to delete
:type username: str
:return: Success of user deletion
:rtype: bool |
6,679 | def sub_retab(match):
r
before = match.group(1)
tabs = len(match.group(2))
return before + ( * (TAB_SIZE * tabs - len(before) % TAB_SIZE)) | r"""Remove all tabs and convert them into spaces.
PARAMETERS:
match -- regex match; uses re_retab pattern: \1 is text before tab,
\2 is a consecutive string of tabs.
A simple substitution of 4 spaces would result in the following:
to\tlive # original
to live # simple substitution
Instead, we convert tabs like the following:
to\tlive # original
to live # the tab *looks* like two spaces, so we convert
# it to two spaces |
6,680 | def merge_up(self, target_branch=None, feature_branch=None, delete=True, create=True):
timer = Timer()
repository_was_created = self.create()
revision_to_merge = None
if not target_branch:
target_branch = self.current_branch
if not target_branch:
raise TypeError("You need to specify the target branch! (where merging starts)")
feature_branch = coerce_feature_branch(feature_branch) if feature_branch else None
self.ensure_clean()
revision_to_merge = self.find_revision_id(target_branch)
elif not create:
raise ValueError("The target branch %r doesnre about to merge.
revision_to_merge = self.find_revision_id(feature_branch.revision)
self.merge(revision=feature_branch.revision)
self.commit(message="Merged %s" % feature_branch.expression)
self.checkout(revision=to_branch)
self.merge(revision=from_branch)
self.commit(message="Merged %s" % from_branch)
merge_queue.pop(0)
if delete and feature_branch and self.is_feature_branch(feature_branch.revision):
self.delete_branch(
branch_name=feature_branch.revision,
message="Closing feature branch %s" % feature_branch.revision,
)
self.checkout()
logger.info("Done! Finished merging up in %s.", timer)
return revision_to_merge | Merge a change into one or more release branches and the default branch.
:param target_branch: The name of the release branch where merging of
the feature branch starts (a string or
:data:`None`, defaults to
:attr:`current_branch`).
:param feature_branch: The feature branch to merge in (any value
accepted by :func:`coerce_feature_branch()`).
:param delete: :data:`True` (the default) to delete or close the
feature branch after it is merged, :data:`False`
otherwise.
:param create: :data:`True` to automatically create the target branch
when it doesn't exist yet, :data:`False` otherwise.
:returns: If `feature_branch` is given the global revision id of the
feature branch is returned, otherwise the global revision id
of the target branch (before any merges performed by
:func:`merge_up()`) is returned. If the target branch is
created by :func:`merge_up()` and `feature_branch` isn't
given then :data:`None` is returned.
:raises: The following exceptions can be raised:
- :exc:`~exceptions.TypeError` when `target_branch` and
:attr:`current_branch` are both :data:`None`.
- :exc:`~exceptions.ValueError` when the given target branch
doesn't exist (based on :attr:`branches`) and `create` is
:data:`False`.
- :exc:`~executor.ExternalCommandFailed` if a command fails. |
6,681 | def get_genres(self):
page = r.get(ITUNES_GENRES_URL)
tree = html.fromstring(page.content)
elements = tree.xpath("//a[@class=]")
return [e.attrib[] for e in elements] | Grab genre URLs from iTunes Podcast preview |
6,682 | def _filter_list_to_conjunction_expression(filter_list):
if not isinstance(filter_list, list):
raise AssertionError(u.format(filter_list))
if any((not isinstance(filter_block, Filter) for filter_block in filter_list)):
raise AssertionError(u.format(filter_list))
expression_list = [filter_block.predicate for filter_block in filter_list]
return expression_list_to_conjunction(expression_list) | Convert a list of filters to an Expression that is the conjunction of all of them. |
6,683 | def title(self):
name = c.namemap_lookup(self.id)
if name is None:
name = self._title + " " + client.get_semester_title(self)
c.namemap_set(self.id, name)
return secure_filename(name) | The title of the course. If no entry in the namemap of the configuration is found a new entry is created with name=$STUD.IP_NAME + $SEMESTER_NAME |
6,684 | def setup_matchedfltr_workflow(workflow, science_segs, datafind_outs,
tmplt_banks, output_dir=None,
injection_file=None, tags=None):
BNSINJECTIONSNOINJECTIONANALYSIS
if tags is None:
tags = []
logging.info("Entering matched-filtering setup module.")
make_analysis_dir(output_dir)
cp = workflow.cp
mfltrMethod = cp.get_opt_tags("workflow-matchedfilter", "matchedfilter-method",
tags)
if mfltrMethod == "WORKFLOW_INDEPENDENT_IFOS":
logging.info("Adding matched-filter jobs to workflow.")
if cp.has_option_tags("workflow-matchedfilter",
"matchedfilter-link-to-tmpltbank", tags):
if not cp.has_option_tags("workflow-tmpltbank",
"tmpltbank-link-to-matchedfilter", tags):
errMsg = "If using matchedfilter-link-to-tmpltbank, you should "
errMsg += "also use tmpltbank-link-to-matchedfilter."
logging.warn(errMsg)
linkToTmpltbank = True
else:
linkToTmpltbank = False
if cp.has_option_tags("workflow-matchedfilter",
"matchedfilter-compatibility-mode", tags):
if not linkToTmpltbank:
errMsg = "Compatibility mode requires that the "
errMsg += "matchedfilter-link-to-tmpltbank option is also set."
raise ValueError(errMsg)
if not cp.has_option_tags("workflow-tmpltbank",
"tmpltbank-compatibility-mode", tags):
errMsg = "If using compatibility mode it must be set both in "
errMsg += "the template bank and matched-filtering stages."
raise ValueError(errMsg)
compatibility_mode = True
else:
compatibility_mode = False
inspiral_outs = setup_matchedfltr_dax_generated(workflow, science_segs,
datafind_outs, tmplt_banks, output_dir,
injection_file=injection_file,
tags=tags,
link_to_tmpltbank=linkToTmpltbank,
compatibility_mode=compatibility_mode)
elif mfltrMethod == "WORKFLOW_MULTIPLE_IFOS":
logging.info("Adding matched-filter jobs to workflow.")
inspiral_outs = setup_matchedfltr_dax_generated_multi(workflow,
science_segs, datafind_outs, tmplt_banks,
output_dir, injection_file=injection_file,
tags=tags)
else:
errMsg = "Matched filter method not recognized. Must be one of "
errMsg += "WORKFLOW_INDEPENDENT_IFOS (currently only one option)."
raise ValueError(errMsg)
logging.info("Leaving matched-filtering setup module.")
return inspiral_outs | This function aims to be the gateway for setting up a set of matched-filter
jobs in a workflow. This function is intended to support multiple
different ways/codes that could be used for doing this. For now the only
supported sub-module is one that runs the matched-filtering by setting up
a serious of matched-filtering jobs, from one executable, to create
matched-filter triggers covering the full range of science times for which
there is data and a template bank file.
Parameters
-----------
Workflow : pycbc.workflow.core.Workflow
The workflow instance that the coincidence jobs will be added to.
science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances
The list of times that are being analysed in this workflow.
datafind_outs : pycbc.workflow.core.FileList
An FileList of the datafind files that are needed to obtain the
data used in the analysis.
tmplt_banks : pycbc.workflow.core.FileList
An FileList of the template bank files that will serve as input
in this stage.
output_dir : path
The directory in which output will be stored.
injection_file : pycbc.workflow.core.File, optional (default=None)
If given the file containing the simulation file to be sent to these
jobs on the command line. If not given no file will be sent.
tags : list of strings (optional, default = [])
A list of the tagging strings that will be used for all jobs created
by this call to the workflow. An example might be ['BNSINJECTIONS'] or
['NOINJECTIONANALYSIS']. This will be used in output names.
Returns
-------
inspiral_outs : pycbc.workflow.core.FileList
A list of output files written by this stage. This *will not* contain
any intermediate products produced within this stage of the workflow.
If you require access to any intermediate products produced at this
stage you can call the various sub-functions directly. |
6,685 | def runSearchReferenceSets(self, request):
return self.runSearchRequest(
request, protocol.SearchReferenceSetsRequest,
protocol.SearchReferenceSetsResponse,
self.referenceSetsGenerator) | Runs the specified SearchReferenceSetsRequest. |
6,686 | def start_time(self):
dt = self.nc[].dt
return datetime(year=dt.year, month=dt.month, day=dt.day,
hour=dt.hour, minute=dt.minute,
second=dt.second, microsecond=dt.microsecond) | Start timestamp of the dataset |
6,687 | def get_lang_dict(self):
r = self.yandex_translate_request("getLangs")
self.handle_errors(r)
return r.json()["langs"] | gets supported langs as an dictionary |
6,688 | def add_or_update(self, app_id):
logger.info(.format(self.userinfo.uid, app_id))
MCollect.add_or_update(self.userinfo.uid, app_id)
out_dic = {: True}
return json.dump(out_dic, self) | Add or update the category. |
6,689 | def wait_until_not_moving(self, timeout=None):
return self.wait(lambda state: self.STATE_RUNNING not in state or self.STATE_STALLED in state, timeout) | Blocks until ``running`` is not in ``self.state`` or ``stalled`` is in
``self.state``. The condition is checked when there is an I/O event
related to the ``state`` attribute. Exits early when ``timeout``
(in milliseconds) is reached.
Returns ``True`` if the condition is met, and ``False`` if the timeout
is reached.
Example::
m.wait_until_not_moving() |
6,690 | def execute_cmdline_scenarios(scenario_name, args, command_args):
scenarios = molecule.scenarios.Scenarios(
get_configs(args, command_args), scenario_name)
scenarios.print_matrix()
for scenario in scenarios:
try:
execute_scenario(scenario)
except SystemExit:
if command_args.get() == :
msg = (
". Cleaning up.").format(scenario.config.subcommand,
scenario.config.action)
LOG.warn(msg)
execute_subcommand(scenario.config, )
execute_subcommand(scenario.config, )
scenario.prune()
util.sysexit()
else:
raise | Execute scenario sequences based on parsed command-line arguments.
This is useful for subcommands that run scenario sequences, which
excludes subcommands such as ``list``, ``login``, and ``matrix``.
``args`` and ``command_args`` are combined using :func:`get_configs`
to generate the scenario(s) configuration.
:param scenario_name: Name of scenario to run, or ``None`` to run all.
:param args: ``args`` dict from ``click`` command context
:param command_args: dict of command argumentss, including the target
subcommand to execute
:returns: None |
6,691 | def setup(self, phase=None, quantity=, conductance=, r_tolerance=None,
max_iter=None, relaxation_source=None,
relaxation_quantity=None, **kwargs):
r
if phase:
self.settings[] = phase.name
if quantity:
self.settings[] = quantity
if conductance:
self.settings[] = conductance
if r_tolerance:
self.settings[] = r_tolerance
if max_iter:
self.settings[] = max_iter
if relaxation_source:
self.settings[] = relaxation_source
if relaxation_quantity:
self.settings[] = relaxation_quantity
super().setup(**kwargs) | r"""
This method takes several arguments that are essential to running the
algorithm and adds them to the settings
Parameters
----------
phase : OpenPNM Phase object
The phase on which the algorithm is to be run. If no value is
given, the existing value is kept.
quantity : string
The name of the physical quantity to be calcualted such as
``'pore.xxx'``.
conductance : string
The name of the pore-scale transport conductance values. These
are typically calculated by a model attached to a *Physics* object
associated with the given *Phase*. Example; ``'throat.yyy'``.
r_tolerance : scalar
Tolerance to achieve. The solver returns a solution when 'residual'
falls below 'r_tolerance'. The default value is 0.001.
max_iter : scalar
The maximum number of iterations the solver can perform to find
a solution. The default value is 5000.
relaxation_source : scalar, between 0 and 1
A relaxation factor to control under-relaxation of the source term.
Factor approaching 0 : improved stability but slow simulation.
Factor approaching 1 : fast simulation but may be unstable.
Default value is 1 (no under-relaxation).
relaxation_quantity : scalar, between 0 and 1
A relaxation factor to control under-relaxation for the quantity
solving for.
Factor approaching 0 : improved stability but slow simulation.
Factor approaching 1 : fast simulation but may be unstable.
Default value is 1 (no under-relaxation).
Notes
-----
Under-relaxation is a technique used for improving stability of a
computation, particularly in the presence of highly non-linear terms.
Under-relaxation used here limits the change in a variable from one
iteration to the next. An optimum choice of the relaxation factor is
one that is small enough to ensure stable simulation and large enough
to speed up the computation. |
6,692 | def learn(self, state_arr, limit=1000):
while self.t <= limit:
next_action_arr = self.extract_possible_actions(state_arr)
predicted_q_arr = self.__function_approximator.inference_q(next_action_arr)
reward_value_arr = np.empty((next_action_arr.shape[0], 1))
next_max_q_arr = np.empty((next_action_arr.shape[0], 1))
for i in range(reward_value_arr.shape[0]):
reward_value_arr[i] = self.observe_reward_value(state_arr, next_action_arr[i])
next_next_action_arr = self.extract_possible_actions(next_action_arr[i])
next_max_q_arr[i] = self.__function_approximator.inference_q(next_next_action_arr).max()
action_arr, predicted_q = self.select_action(next_action_arr, predicted_q_arr)
real_q_arr = self.update_q(
predicted_q_arr,
reward_value_arr,
next_max_q_arr
)
real_q = real_q_arr[np.where(predicted_q_arr == predicted_q)[0][0]]
if self.__q_logs_arr.shape[0] > 0:
self.__q_logs_arr = np.r_[
self.__q_logs_arr,
np.array([predicted_q, real_q]).reshape(1, 2)
]
else:
self.__q_logs_arr = np.array([predicted_q, real_q]).reshape(1, 2)
self.learn_q(predicted_q_arr, real_q_arr)
state_arr = self.update_state(state_arr, action_arr)
self.t += 1
end_flag = self.check_the_end_flag(state_arr)
if end_flag is True:
break | Learning and searching the optimal solution.
Args:
state_arr: `np.ndarray` of initial state.
limit: The maximum number of iterative updates based on value iteration algorithms. |
6,693 | def jsonarrappend(self, name, path=Path.rootPath(), *args):
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command(, *pieces) | Appends the objects ``args`` to the array under the ``path` in key
``name`` |
6,694 | def _LeaseMessageHandlerRequests(self, lease_time, limit, cursor=None):
now = rdfvalue.RDFDatetime.Now()
now_str = mysql_utils.RDFDatetimeToTimestamp(now)
expiry = now + lease_time
expiry_str = mysql_utils.RDFDatetimeToTimestamp(expiry)
query = ("UPDATE message_handler_requests "
"SET leased_until=FROM_UNIXTIME(%s), leased_by=%s "
"WHERE leased_until IS NULL OR leased_until < FROM_UNIXTIME(%s) "
"LIMIT %s")
id_str = utils.ProcessIdString()
args = (expiry_str, id_str, now_str, limit)
updated = cursor.execute(query, args)
if updated == 0:
return []
cursor.execute(
"SELECT UNIX_TIMESTAMP(timestamp), request "
"FROM message_handler_requests "
"WHERE leased_by=%s AND leased_until=FROM_UNIXTIME(%s) LIMIT %s",
(id_str, expiry_str, updated))
res = []
for timestamp, request in cursor.fetchall():
req = rdf_objects.MessageHandlerRequest.FromSerializedString(request)
req.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp)
req.leased_until = expiry
req.leased_by = id_str
res.append(req)
return res | Leases a number of message handler requests up to the indicated limit. |
6,695 | def setDetailedText( self, text ):
super(XMessageBox, self).setDetailedText(text)
if ( text ):
widgets = self.findChildren(QTextEdit)
widgets[0].setLineWrapMode(QTextEdit.NoWrap)
widgets[0].setHtml(text)
widgets[0].setMaximumHeight(1000)
widgets[0].setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding)
buttons = self.findChildren(QPushButton)
for button in buttons:
if ( button.text() == ):
button.clicked.connect( self.updateSizeMode )
break | Sets the details text for this message box to the inputed text. \
Overloading the default method to support HTML details.
:param text | <str> |
6,696 | def to_pretty_midi(self, constant_tempo=None, constant_velocity=100):
self.check_validity()
pm = pretty_midi.PrettyMIDI(initial_tempo=self.tempo[0])
if constant_tempo is None:
constant_tempo = self.tempo[0]
time_step_size = 60. / constant_tempo / self.beat_resolution
for track in self.tracks:
instrument = pretty_midi.Instrument(
program=track.program, is_drum=track.is_drum, name=track.name)
copied = track.copy()
if copied.is_binarized():
copied.assign_constant(constant_velocity)
copied.clip()
clipped = copied.pianoroll.astype(np.uint8)
binarized = (clipped > 0)
padded = np.pad(binarized, ((1, 1), (0, 0)), )
diff = np.diff(padded.astype(np.int8), axis=0)
positives = np.nonzero((diff > 0).T)
pitches = positives[0]
note_ons = positives[1]
note_on_times = time_step_size * note_ons
note_offs = np.nonzero((diff < 0).T)[1]
note_off_times = time_step_size * note_offs
for idx, pitch in enumerate(pitches):
velocity = np.mean(clipped[note_ons[idx]:note_offs[idx], pitch])
note = pretty_midi.Note(
velocity=int(velocity), pitch=pitch,
start=note_on_times[idx], end=note_off_times[idx])
instrument.notes.append(note)
instrument.notes.sort(key=lambda x: x.start)
pm.instruments.append(instrument)
return pm | Convert to a :class:`pretty_midi.PrettyMIDI` instance.
Notes
-----
- Only constant tempo is supported by now.
- The velocities of the converted pianorolls are clipped to [0, 127],
i.e. values below 0 and values beyond 127 are replaced by 127 and 0,
respectively.
- Adjacent nonzero values of the same pitch will be considered a single
note with their mean as its velocity.
Parameters
----------
constant_tempo : int
The constant tempo value of the output object. Defaults to use the
first element of `tempo`.
constant_velocity : int
The constant velocity to be assigned to binarized tracks. Defaults
to 100.
Returns
-------
pm : `pretty_midi.PrettyMIDI` object
The converted :class:`pretty_midi.PrettyMIDI` instance. |
6,697 | def _processor(self):
self.store.cleanup(self._config.timeout)
self._load() | Application processor to setup session for every request |
6,698 | def handler(self, environ, start_response):
if environ[] == :
return self.handle_POST(environ, start_response)
else:
start_response("400 Bad request", [(, )])
return [] | XMLRPC service for windmill browser core to communicate with |
6,699 | async def verify_worker_impls(chain):
valid_worker_impls = get_valid_worker_impls()
for obj in chain.get_all_links_in_chain():
worker_impl = obj.worker_impl
log.info("Verifying {} {} as a {} task...".format(obj.name, obj.task_id, worker_impl))
await valid_worker_impls[worker_impl](chain, obj) | Verify the task type (e.g. decision, build) of each link in the chain.
Args:
chain (ChainOfTrust): the chain we're operating on
Raises:
CoTError: on failure |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.