Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
26,900 |
def list_math_substraction_number(a, b):
return [a[i] - b for i in range(len(a))];
|
!
@brief Calculates subtraction between list and number.
@details Each element from list 'a' is subtracted by number 'b'.
@param[in] a (list): List of elements that supports mathematical subtraction.
@param[in] b (list): Value that supports mathematical subtraction.
@return (list) Results of subtraction between list and number.
|
26,901 |
def house_explosions():
chart = PieChart2D(int(settings.width * 1.7), settings.height)
chart.add_data([10, 10, 30, 200])
chart.set_pie_labels([
,
,
,
,
])
chart.download()
|
Data from http://indexed.blogspot.com/2007/12/meltdown-indeed.html
|
26,902 |
def load_yaml_file(yaml_file):
with io.open(yaml_file, , encoding=) as stream:
yaml_content = yaml.load(stream)
_check_format(yaml_file, yaml_content)
return yaml_content
|
load yaml file and check file content format
|
26,903 |
def _get_table_rows(parent_table, table_name, row_name):
if parent_table is None:
return []
_table = parent_table.get(table_name)
_table_rows = []
if isinstance(_table, list):
_table_rows = [_table_row.get(row_name) for _table_row in _table]
elif isinstance(_table, dict):
_table_rows = _table.get(row_name)
if not isinstance(_table_rows, list):
_table_rows = [_table_rows]
return _table_rows
|
Inconsistent behavior:
{'TABLE_intf': [{'ROW_intf': {
vs
{'TABLE_mac_address': {'ROW_mac_address': [{
vs
{'TABLE_vrf': {'ROW_vrf': {'TABLE_adj': {'ROW_adj': {
|
26,904 |
def get_doc(self):
if not self.pyname:
return None
pyobject = self.pyname.get_object()
if not hasattr(pyobject, ):
return None
return self.pyname.get_object().get_doc()
|
Get the proposed object's docstring.
Returns None if it can not be get.
|
26,905 |
def plot_kmf(df,
condition_col,
censor_col,
survival_col,
strata_col=None,
threshold=None,
title=None,
xlabel=None,
ylabel=None,
ax=None,
with_condition_color="
no_condition_color="
with_condition_label=None,
no_condition_label=None,
color_map=None,
label_map=None,
color_palette="Set1",
ci_show=False,
print_as_title=False):
if threshold is None:
if df[condition_col].dtype != "bool" and \
np.issubdtype(df[condition_col].dtype, np.number):
threshold = "median"
elif isinstance(threshold, numbers.Number):
logger.debug("threshold value is numeric")
elif threshold not in ("median", "median-per-strata"):
raise ValueError("invalid input for threshold. Must be numeric, None, , or .")
elif threshold == "median-per-strata" and strata_col is None:
raise ValueError("threshold given was and yet `strata_col` was None. Did you mean ?")
arglist = dict(
condition_col=condition_col,
survival_col=survival_col,
censor_col=censor_col,
threshold=threshold,
with_condition_color=with_condition_color,
no_condition_color=no_condition_color,
with_condition_label=with_condition_label,
no_condition_label=no_condition_label,
color_map=color_map,
label_map=label_map,
xlabel=xlabel,
ylabel=ylabel,
ci_show=ci_show,
color_palette=color_palette,
print_as_title=print_as_title)
if strata_col is None:
arglist.update(dict(
df=df,
title=title,
ax=ax))
return _plot_kmf_single(**arglist)
else:
if threshold == "median":
arglist["threshold"] = df[condition_col].dropna().median()
elif threshold == "median-per-strata":
arglist["threshold"] = "median"
if ax is not None:
raise ValueError("ax not supported with stratified analysis.")
n_strata = len(df[strata_col].unique())
f, ax = plt.subplots(n_strata, sharex=True)
results = dict()
for i, (strat_name, strat_df) in enumerate(df.groupby(strata_col)):
if n_strata == 1:
arglist["ax"] = ax
else:
arglist["ax"] = ax[i]
subtitle = "{}: {}".format(strata_col, strat_name)
arglist["title"] = subtitle
arglist["df"] = strat_df
results[subtitle] = plot_kmf(**arglist)
[print(desc) for desc in results[subtitle].desc]
if title:
f.suptitle(title)
return results
|
Plot survival curves by splitting the dataset into two groups based on
condition_col. Report results for a log-rank test (if two groups are plotted)
or CoxPH survival analysis (if >2 groups) for association with survival.
Regarding definition of groups:
If condition_col is numeric, values are split into 2 groups.
- if threshold is defined, the groups are split on being > or < condition_col
- if threshold == 'median', the threshold is set to the median of condition_col
If condition_col is categorical or string, results are plotted for each unique value in the dataset.
If condition_col is None, results are plotted for all observations
Currently, if `strata_col` is given, the results are repeated among each stratum of the df.
A truly "stratified" analysis is not yet supported by may be soon.
Parameters
----------
df: dataframe
condition_col: string, column which contains the condition to split on
survival_col: string, column which contains the survival time
censor_col: string,
strata_col: optional string, denoting column containing data to
stratify by (default: None)
threshold: int or string, if int, condition_col is thresholded at int,
if 'median', condition_col thresholded
at its median
if 'median-per-strata', & if stratified analysis
then condition_col thresholded by strata
title: Title for the plot, default None
ax: an existing matplotlib ax, optional, default None
note: not currently supported when `strata_col` is not None
with_condition_color: str, hex code color for the with-condition curve
no_condition_color: str, hex code color for the no-condition curve
with_condition_label: str, optional, label for True condition case
no_condition_label: str, optional, label for False condition case
color_map: dict, optional, mapping of hex-values to condition text
in the form of {value_name: color_hex_code}.
defaults to `sb.color_palette` using `default_color_palette` name,
or *_condition_color options in case of boolean operators.
label_map: dict, optional, mapping of labels to condition text.
defaults to "condition_name = condition_value", or *_condition_label
options in case of boolean operators.
color_palette: str, optional, name of sb.color_palette to use
if color_map not provided.
print_as_title: bool, optional, whether or not to print text
within the plot's title vs. stdout, default False
|
26,906 |
def OnExitSelectionMode(self, event):
self.grid.sel_mode_cursor = None
self.grid.EnableDragGridSize(True)
self.grid.EnableEditing(True)
|
Event handler for leaving selection mode, enables cell edits
|
26,907 |
def _convert_property_type(value):
if value in (, ):
return True
elif value in (, ):
return False
elif str(value).startswith() and str(value).endswith():
return ast.literal_eval(value)
else:
try:
return int(value)
except ValueError:
return value
|
Converts the string value in a boolean, integer or string
:param value: string value
:returns: boolean, integer or string value
|
26,908 |
def get_file_to_stream(
self, share_name, directory_name, file_name, stream,
start_range=None, end_range=None, validate_content=False,
progress_callback=None, max_connections=2, timeout=None):
_validate_not_none(, share_name)
_validate_not_none(, file_name)
_validate_not_none(, stream)
if max_connections == 1:
file = self._get_file(share_name,
directory_name,
file_name,
start_range=start_range,
end_range=end_range,
validate_content=validate_content,
timeout=timeout)
download_size = file.properties.content_length
else:
if sys.version_info >= (3,) and not stream.seekable():
raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE
initial_request_start = start_range if start_range else 0
if end_range and end_range - start_range < first_get_size:
initial_request_end = end_range
else:
initial_request_end = initial_request_start + first_get_size - 1
operation_context = _OperationContext(location_lock=True)
try:
file = self._get_file(share_name,
directory_name,
file_name,
start_range=initial_request_start,
end_range=initial_request_end,
validate_content=validate_content,
timeout=timeout,
_context=operation_context)
file_size = _parse_length_from_content_range(file.properties.content_range)
if end_range:
download_size = min(file_size, end_range - start_range + 1)
elif start_range:
download_size = file_size - start_range
else:
download_size = file_size
except AzureHttpError as ex:
if not start_range and ex.status_code == 416:
file = self._get_file(share_name,
directory_name,
file_name,
validate_content=validate_content,
timeout=timeout,
_context=operation_context)
download_size = 0
else:
raise ex
if progress_callback:
progress_callback(file.properties.content_length, download_size)
if file.content is not None:
stream.write(file.content)
file.content = None
if file.properties.content_length != download_size:
end_file = file_size
if end_range:
end_file = min(file_size, end_range + 1)
_download_file_chunks(
self,
share_name,
directory_name,
file_name,
download_size,
self.MAX_CHUNK_GET_SIZE,
first_get_size,
initial_request_end + 1,
end_file,
stream,
max_connections,
progress_callback,
validate_content,
timeout,
operation_context,
)
file.properties.content_length = download_size
file.properties.content_range = .format(start_range, end_range, file_size)
file.properties.content_md5 = None
return file
|
Downloads a file to a stream, with automatic chunking and progress
notifications. Returns an instance of :class:`File` with properties
and metadata.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param io.IOBase stream:
Opened file/stream to write to.
:param int start_range:
Start of byte range to use for downloading a section of the file.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for downloading a section of the file.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool validate_content:
If set to true, validates an MD5 hash for each retrieved portion of
the file. This is primarily valuable for detecting bitflips on the wire
if using http instead of https as https (the default) will already
validate. Note that the service will only return transactional MD5s
for chunks 4MB or less so the first get request will be of size
self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If
self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be
thrown. As computing the MD5 takes processing time and more requests
will need to be done due to the reduced chunk size there may be some
increase in latency.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the file if known.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
If set to 2 or greater, an initial get will be done for the first
self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file,
the method returns at this point. If it is not, it will download the
remaining data parallel using the number of threads equal to
max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
If set to 1, a single large get request will be done. This is not
generally recommended but available if very few threads should be
used, network requests are very expensive, or a non-seekable stream
prevents parallel download. This may also be valuable if the file is
being concurrently modified to enforce atomicity or if many files are
expected to be empty as an extra request is required for empty files
if max_connections is greater than 1.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A File with properties and metadata.
:rtype: :class:`~azure.storage.file.models.File`
|
26,909 |
def InitPmf(self, values):
for value, prob in values.Items():
self.Set(value, prob)
|
Initializes with a Pmf.
values: Pmf object
|
26,910 |
def reparentNamespaces(self):
namespace_parts = []
namespace_ranks = []
for n in self.namespaces:
parts = n.name.split("::")
for p in parts:
if p not in namespace_parts:
namespace_parts.append(p)
namespace_ranks.append((len(parts), n))
traversal = sorted(namespace_ranks)
removals = []
for rank, namespace in reversed(traversal):
if rank < 2:
continue
for p_rank, p_namespace in reversed(traversal):
if p_rank == rank - 1:
if p_namespace.name == "::".join(namespace.name.split("::")[:-1]):
p_namespace.children.append(namespace)
namespace.parent = p_namespace
if namespace not in removals:
removals.append(namespace)
continue
removals = []
for nspace in self.namespaces:
if nspace.parent and nspace.parent.kind == "namespace" and nspace not in removals:
removals.append(nspace)
for rm in removals:
self.namespaces.remove(rm)
|
Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Adds nested
namespaces as children to the relevant namespace ExhaleNode. If a node in
``self.namespaces`` is added as a child to a different namespace node, it is
removed from the ``self.namespaces`` list. Because these are removed from
``self.namespaces``, it is important that
:func:`~exhale.graph.ExhaleRoot.renameToNamespaceScopes` is called before this
method.
|
26,911 |
def visit_pass(self, node, parent):
return nodes.Pass(node.lineno, node.col_offset, parent)
|
visit a Pass node by returning a fresh instance of it
|
26,912 |
def _insert_plain_text(self, cursor, text):
cursor.beginEditBlock()
if self.ansi_codes:
for substring in self._ansi_processor.split_string(text):
for act in self._ansi_processor.actions:
cursor.movePosition(cursor.EndOfLine)
format = self._ansi_processor.get_format()
selection = cursor.selectedText()
if len(selection) == 0:
cursor.insertText(substring, format)
elif substring is not None:
if len(substring) >= len(selection):
cursor.insertText(substring, format)
else:
old_text = selection[len(substring):]
cursor.insertText(substring + old_text, format)
cursor.movePosition(cursor.PreviousCharacter,
cursor.KeepAnchor, len(old_text))
else:
cursor.insertText(text)
cursor.endEditBlock()
|
Inserts plain text using the specified cursor, processing ANSI codes
if enabled.
|
26,913 |
def fill_symbolic(self):
self.wYear = self.state.solver.BVS(, 16, key=(, , ))
self.wMonth = self.state.solver.BVS(, 16, key=(, , ))
self.wDayOfWeek = self.state.solver.BVS(, 16, key=(, , ))
self.wDay = self.state.solver.BVS(, 16, key=(, , ))
self.wHour = self.state.solver.BVS(, 16, key=(, , ))
self.wMinute = self.state.solver.BVS(, 16, key=(, , ))
self.wSecond = self.state.solver.BVS(, 16, key=(, , ))
self.wMilliseconds = self.state.solver.BVS(, 16, key=(, , ))
self.state.add_constraints(self.wYear >= 1601)
self.state.add_constraints(self.wYear <= 30827)
self.state.add_constraints(self.wMonth >= 1)
self.state.add_constraints(self.wMonth <= 12)
self.state.add_constraints(self.wDayOfWeek <= 6)
self.state.add_constraints(self.wDay >= 1)
self.state.add_constraints(self.wDay <= 31)
self.state.add_constraints(self.wHour <= 23)
self.state.add_constraints(self.wMinute <= 59)
self.state.add_constraints(self.wSecond <= 59)
self.state.add_constraints(self.wMilliseconds <= 999)
|
Fill the class with constrained symbolic values.
|
26,914 |
def patch_module_function(module, target, aspect, force_name=None, bag=BrokenBag, **options):
logdebug("patch_module_function (module=%s, target=%s, aspect=%s, force_name=%s, **options=%s",
module, target, aspect, force_name, options)
name = force_name or target.__name__
return patch_module(module, name, _checked_apply(aspect, target, module=module), original=target, **options)
|
Low-level patcher for one function from a specified module.
.. warning:: You should not use this directly.
:returns: An :obj:`aspectlib.Rollback` object.
|
26,915 |
def read_var_uint64(self):
i = self._read_varint_helper()
if not 0 <= i <= wire_format.UINT64_MAX:
raise errors.DecodeError( % i)
return i
|
Reads a varint from the stream, interprets this varint
as an unsigned, 64-bit integer, and returns the integer.
|
26,916 |
def send_media_group(self, chat_id, media, disable_notification=None, reply_to_message_id=None):
result = apihelper.send_media_group(self.token, chat_id, media, disable_notification, reply_to_message_id)
ret = []
for msg in result:
ret.append(types.Message.de_json(msg))
return ret
|
send a group of photos or videos as an album. On success, an array of the sent Messages is returned.
:param chat_id:
:param media:
:param disable_notification:
:param reply_to_message_id:
:return:
|
26,917 |
def title_case(string):
if not string:
return string
string = string.replace(, ).replace(, )
parts = de_camel(string, , _lowercase=False).strip().split()
return .join([part if part.isupper() else part.title()
for part in parts])
|
Converts a string to title case. For example::
title_case('one_two_three') -> 'One Two Three'
|
26,918 |
def copyNodeList(self, node):
if node is None: node__o = None
else: node__o = node._o
ret = libxml2mod.xmlDocCopyNodeList(self._o, node__o)
if ret is None:raise treeError()
__tmp = xmlNode(_obj=ret)
return __tmp
|
Do a recursive copy of the node list.
|
26,919 |
def drop(self, labels, errors=):
arr_dtype = if self.dtype == else None
labels = com.index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != :
raise KeyError(
.format(labels[mask]))
indexer = indexer[~mask]
return self.delete(indexer)
|
Make new Index with passed list of labels deleted.
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
Raises
------
KeyError
If not all of the labels are found in the selected axis
|
26,920 |
def to_binary_string(obj, encoding=None):
if PY2:
if encoding is None:
return str(obj)
else:
return obj.encode(encoding)
else:
return bytes(obj, if encoding is None else encoding)
|
Convert `obj` to binary string (bytes in Python 3, str in Python 2)
|
26,921 |
def _parse_dtype(self, space):
if isinstance(space, gym.spaces.Discrete):
return tf.int32
if isinstance(space, gym.spaces.Box):
return tf.float32
raise NotImplementedError()
|
Get a tensor dtype from a OpenAI Gym space.
Args:
space: Gym space.
Raises:
NotImplementedError: For spaces other than Box and Discrete.
Returns:
TensorFlow data type.
|
26,922 |
def save(self, annot=None, output_path=None):
if annot is not None:
self.fig.suptitle(annot, backgroundcolor=, color=)
if output_path is not None:
output_path = output_path.replace(, )
self.fig.savefig(output_path + , bbox_inches=, dpi=200,
bbox_extra_artists=self.flat_grid)
|
Saves the collage to disk as an image.
Parameters
-----------
annot : str
text to annotate the figure with a super title
output_path : str
path to save the figure to.
Note: any spaces in the filename will be replace with ``_``
|
26,923 |
def register(self, name, content, description=None):
return self.__app.documents.register(name, content, self._plugin, description)
|
Register a new document.
:param content: Content of this document. Jinja and rst are supported.
:type content: str
:param name: Unique name of the document for documentation purposes.
:param description: Short description of this document
|
26,924 |
def asyncPipeRename(context=None, _INPUT=None, conf=None, **kwargs):
splits = yield asyncGetSplits(_INPUT, conf[], **cdicts(opts, kwargs))
_OUTPUT = yield maybeDeferred(parse_results, splits, **kwargs)
returnValue(_OUTPUT)
|
An operator that asynchronously renames or copies fields in the input
source. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : asyncPipe like object (twisted Deferred iterable of items)
conf : {
'RULE': [
{
'op': {'value': 'rename or copy'},
'field': {'value': 'old field'},
'newval': {'value': 'new field'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of items
|
26,925 |
def easter(year):
y = year
g = y % 19
e = 0
c = y // 100
h = (c - c // 4 - (8 * c + 13) // 25 + 19 * g + 15) % 30
i = h - (h // 28) * (1 - (h // 28) * (29 // (h + 1)) * ((21 - g) // 11))
j = (y + y // 4 + i + 2 - c + c // 4) % 7
p = i - j + e
d = 1 + (p + 27 + (p + 6) // 40) % 31
m = 3 + (p + 26) // 30
return BusinessDate.from_ymd(int(y), int(m), int(d))
|
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
More about the algorithm may be found at:
http://users.chariot.net.au/~gmarts/eastalg.htm
and
http://www.tondering.dk/claus/calendar.html
|
26,926 |
def _update(dashboard, profile):
payload = {
: dashboard,
: True
}
request_url = "{0}/api/dashboards/db".format(profile.get())
response = requests.post(
request_url,
headers={
"Authorization": "Bearer {0}".format(profile.get())
},
json=payload
)
return response.json()
|
Update a specific dashboard.
|
26,927 |
def command(self, payload):
data = self.message(MessageType.COMMAND, payload)
if data:
return json.loads(data, object_hook=CommandReply)
else:
return None
|
Send a command to i3. See the `list of commands
<http://i3wm.org/docs/userguide.html#_list_of_commands>`_ in the user
guide for available commands. Pass the text of the command to execute
as the first arguments. This is essentially the same as using
``i3-msg`` or an ``exec`` block in your i3 config to control the
window manager.
:rtype: List of :class:`CommandReply` or None if the command causes i3
to restart or exit and does not give a reply.
|
26,928 |
def compile_assets(self):
try:
curdir = os.path.abspath(os.curdir)
client_path = os.path.join(os.path.dirname(__file__), , )
os.chdir(client_path)
subprocess.check_call([, ])
subprocess.check_call([, , ])
os.chdir(curdir)
except (OSError, subprocess.CalledProcessError) as err:
print(.format(err))
raise SystemExit(1)
|
Compile the front end assets
|
26,929 |
def _await_flow(self, client, flow_id):
print(.format(flow_id))
while True:
try:
status = client.Flow(flow_id).Get().data
except grr_errors.UnknownError:
msg = .format(
flow_id, client.data.os_info.fqdn.lower())
self.state.add_error(msg)
raise DFTimewolfError(
.format(
flow_id, client.data.os_info.fqdn.lower()))
if status.state == flows_pb2.FlowContext.ERROR:
message = status.context.backtrace
if in status.context.backtrace:
message = status.context.backtrace.split()[-2]
raise DFTimewolfError(
.format(
flow_id, message))
if status.state == flows_pb2.FlowContext.TERMINATED:
print(.format(flow_id))
break
time.sleep(self._CHECK_FLOW_INTERVAL_SEC)
|
Awaits flow completion.
Args:
client: GRR Client object in which to await the flow.
flow_id: string containing ID of flow to await.
Raises:
DFTimewolfError: if flow error encountered.
|
26,930 |
def T1(word):
WORD = [i for i in re.split(r, word, flags=FLAGS) if i]
sub_rules = set()
count = 1
for i, v in enumerate(WORD):
if i == 0 and phon.is_consonant(v[0]):
sub_rules.add()
elif phon.is_consonant(v[0]):
count += 1
unstressed = count % 2 == 0
if i + 1 == len(WORD):
sub_rules.add()
elif phon.is_cluster(v):
sub_rules.add()
WORD[i] = v[0] + + v[1:] if unstressed else + v
elif phon.is_cluster(v[1:]):
return WORD, rules
|
Insert a syllable boundary in front of every CV sequence.
|
26,931 |
def _attributes(note, data):
note_attribute = EvernoteMgr.set_note_attribute(data)
if note_attribute:
note.attributes = note_attribute
return note
|
attribute of the note
:param note: note object
:param data:
:return:
|
26,932 |
def postinit(self, exc=None, cause=None):
self.exc = exc
self.cause = cause
|
Do some setup after initialisation.
:param exc: What is being raised.
:type exc: NodeNG or None
:param cause: The exception being used to raise this one.
:type cause: NodeNG or None
|
26,933 |
def has_bad_headers(self):
headers = [self.sender, self.reply_to] + self.recipients
for header in headers:
if _has_newline(header):
return True
if self.subject:
if _has_newline(self.subject):
for linenum, line in enumerate(self.subject.split()):
if not line:
return True
if linenum > 0 and line[0] not in :
return True
if _has_newline(line):
return True
if len(line.strip()) == 0:
return True
return False
|
Checks for bad headers i.e. newlines in subject, sender or recipients.
RFC5322 allows multiline CRLF with trailing whitespace (FWS) in headers
|
26,934 |
def annotation_rows(prefix, annotations):
ncol = len(annotations[])
return {name.replace(prefix, , 1) : values + [] * (ncol - len(values))
for name, values in annotations.items() if name.startswith(prefix)}
|
Helper function to extract N: and C: rows from annotations and pad their values
|
26,935 |
def chapters(self, title):
r = requests.get("https://www.baka-tsuki.org/project/index.php?title={}".format(title.replace(" ", "_")),
headers=self.header)
if r.status_code != 200:
raise requests.HTTPError("Not Found")
else:
parsed = soup(r.text, )
dd = parsed.find_all("a")
volumes = []
for link in dd:
if in link.attrs:
if in link.get():
continue
if in link.attrs:
if re.search(self.chapter_regex, link.get()) is not None and not link.get().startswith():
volumes.append(link)
seplist = OrderedDict()
for item in volumes:
if in item.attrs:
result = re.search(self.separate_regex, item.get().lower())
else:
result = re.search(self.separate_regex, item.text.lower())
if result and result.groups():
if result.group().lstrip() in seplist:
seplist[result.group().lstrip()].append([item.get(),
item.get() if in item.attrs else item.text])
else:
seplist[result.group().lstrip()] = [[item.get(),
item.get() if in item.attrs else item.text]]
return seplist
|
Get a list of chapters for a visual novel. Keep in mind, this can be slow. I've certainly tried to make it as fast as possible, but it's still pulling text out of a webpage.
:param str title: The title of the novel you want chapters from
:return OrderedDict: An OrderedDict which contains the chapters found for the visual novel supplied
|
26,936 |
def handle_input(self):
difference = self.check_state()
if not difference:
return
self.events = []
self.handle_new_events(difference)
self.update_timeval()
self.events.append(self.sync_marker(self.timeval))
self.write_to_pipe(self.events)
|
Sends differences in the device state to the MicroBitPad
as events.
|
26,937 |
def to_safe_str(s):
|
converts some (tr) non-ascii chars to ascii counterparts,
then return the result as lowercase
|
26,938 |
def remove_label(self, to_remove):
if to_remove not in self.labels:
return
labels = self.labels
labels.remove(to_remove)
with self.fs.open(self.fs.join(self.path, self.LABEL_FILE), ) \
as file_desc:
for label in labels:
file_desc.write("%s,%s\n" % (label.name,
label.get_color_str()))
|
Remove a label from the document. (-> rewrite the label file)
|
26,939 |
def jsd_df_to_2d(jsd_df):
jsd_2d = jsd_df.mean().reset_index()
jsd_2d = jsd_2d.rename(
columns={: , : , 0: })
jsd_2d = jsd_2d.pivot(index=, columns=,
values=)
return jsd_2d + np.tril(jsd_2d.T, -1)
|
Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes
|
26,940 |
def repositories(self):
if self.repo == "sbo":
self.sbo_case_insensitive()
self.find_pkg = sbo_search_pkg(self.name)
if self.find_pkg:
self.dependencies_list = Requires(self.flag).sbo(self.name)
else:
PACKAGES_TXT = Utils().read_file(
self.meta.lib_path + "{0}_repo/PACKAGES.TXT".format(self.repo))
self.names = Utils().package_name(PACKAGES_TXT)
self.bin_case_insensitive()
self.find_pkg = search_pkg(self.name, self.repo)
if self.find_pkg:
self.black = BlackList().packages(self.names, self.repo)
self.dependencies_list = Dependencies(
self.repo, self.black).binary(self.name, self.flag)
|
Get dependencies by repositories
|
26,941 |
def factor_schur(z, DPhival, G, A):
M, N = G.shape
P, N = A.shape
l = z[N+P:N+P+M]
s = z[N+P+M:]
SIG = diags(l/s, 0)
H = DPhival + mydot(G.T, mydot(SIG, G))
LU_H = myfactor(H)
HinvAt = mysolve(LU_H, A.T)
S = mydot(A, HinvAt)
LU_S = myfactor(S)
LU = (LU_S, LU_H)
return LU
|
Multiplier for inequality constraints
|
26,942 |
def configure_analytics_yandex(self, ident, params=None):
params = params or {}
data = {
: ,
: ident,
}
if params:
data[] = % params
self.analytics.append(data)
|
Configure Yandex Metrika analytics counter.
:param str|unicode ident: Metrika counter ID.
:param dict params: Additional params.
|
26,943 |
def getkeystroke(self, scr, vs=None):
k = None
try:
k = scr.get_wch()
self.drawRightStatus(scr, vs or self.sheets[0])
except curses.error:
return
if isinstance(k, str):
if ord(k) >= 32 and ord(k) != 127:
return k
k = ord(k)
return curses.keyname(k).decode()
|
Get keystroke and display it on status bar.
|
26,944 |
def render_it(self, *args, **kwargs):
kind = kwargs.get(, args[0])
num = kwargs.get(, args[1] if len(args) > 1 else 6)
with_tag = kwargs.get(, False)
glyph = kwargs.get(, )
all_cats = MPost.query_most(kind=kind, num=num).objects()
kwd = {
: with_tag,
: router_post[kind],
: glyph
}
return self.render_string(,
recs=all_cats,
kwd=kwd)
|
Render without userinfo.
fun(kind, num)
fun(kind, num, with_tag = val1)
fun(kind, num, with_tag = val1, glyph = val2)
|
26,945 |
def _make_class_unpicklable(cls):
def _break_on_call_reduce(self, protocol=None):
raise TypeError( % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ =
|
Make the given class un-picklable.
|
26,946 |
def ASR(value, amount, width):
if amount == 0:
return value
result, _ = ASR_C(value, amount, width)
return result
|
The ARM ASR (arithmetic shift right) operation.
:param value: Value to shift
:type value: int or long or BitVec
:param int amount: How many bits to shift it.
:param int width: Width of the value
:return: Resultant value
:rtype int or BitVec
|
26,947 |
def generate_search_space(code_dir):
search_space = {}
if code_dir.endswith(slash):
code_dir = code_dir[:-1]
for subdir, _, files in os.walk(code_dir):
if subdir == code_dir:
package =
else:
assert subdir.startswith(code_dir + slash), subdir
prefix_len = len(code_dir) + 1
package = subdir[prefix_len:].replace(slash, ) +
for file_name in files:
if file_name.endswith():
path = os.path.join(subdir, file_name)
module = package + file_name[:-3]
search_space.update(_generate_file_search_space(path, module))
return search_space
|
Generate search space from Python source code.
Return a serializable search space object.
code_dir: directory path of source files (str)
|
26,948 |
def raw_rsa_private_crypt(private_key, data):
if _backend != :
raise SystemError()
if not hasattr(private_key, ) or not isinstance(private_key.asn1, PrivateKeyInfo):
raise TypeError(pretty_message(
,
type_name(private_key)
))
algo = private_key.asn1[][].native
if algo != :
raise ValueError(pretty_message(
,
algo.upper()
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
,
type_name(data)
))
rsa_private_key = private_key.asn1[].parsed
transformed_int = pow(
int_from_bytes(data),
rsa_private_key[].native,
rsa_private_key[].native
)
return int_to_bytes(transformed_int, width=private_key.asn1.byte_size)
|
Performs a raw RSA algorithm in a byte string using a private key.
This is a low-level primitive and is prone to disastrous results if used
incorrectly.
:param private_key:
An oscrypto.asymmetric.PrivateKey object
:param data:
A byte string of the plaintext to be signed or ciphertext to be
decrypted. Must be less than or equal to the length of the private key.
In the case of signing, padding must already be applied. In the case of
decryption, padding must be removed afterward.
:return:
A byte string of the transformed data
|
26,949 |
def initialize_simulants(self):
super().initialize_simulants()
self._initial_population = self.population.get_population(True)
|
Initialize this simulation's population. Should not be called
directly.
|
26,950 |
def adjust_container_limits_for_variadic_sequences(headerDir, containers, maxElements):
for container in containers:
headerFile = os.path.join( headerDir, "limits", container + ".hpp" )
regexMatch = r + container.upper() + r
regexReplace = r + re.escape( str(maxElements) )
for line in fileinput.input( headerFile, inplace=1, mode="rU" ):
line = re.sub(regexMatch, regexReplace, line.rstrip())
print(line)
|
Adjusts the limits of variadic sequence MPL-containers.
|
26,951 |
def volume_down(self):
try:
return bool(self.send_get_command(self._urls.command_volume_down))
except requests.exceptions.RequestException:
_LOGGER.error("Connection error: volume down command not sent.")
return False
|
Volume down receiver via HTTP get command.
|
26,952 |
def get_integer_value(self, label):
if self.has_integer_value(label):
return int(self.my_osid_object._my_map[][label])
raise IllegalState()
|
stub
|
26,953 |
def problem_glob(extension=):
filenames = glob.glob(.format(extension))
return [ProblemFile(file) for file in filenames]
|
Returns ProblemFile objects for all valid problem files
|
26,954 |
def graph_from_bbox(north, south, east, west, network_type=,
simplify=True, retain_all=False, truncate_by_edge=False,
name=, timeout=180, memory=None,
max_query_area_size=50*1000*50*1000, clean_periphery=True,
infrastructure=, custom_filter=None):
if clean_periphery and simplify:
buffer_dist = 500
polygon = Polygon([(west, north), (west, south), (east, south), (east, north)])
polygon_utm, crs_utm = project_geometry(geometry=polygon)
polygon_proj_buff = polygon_utm.buffer(buffer_dist)
polygon_buff, _ = project_geometry(geometry=polygon_proj_buff, crs=crs_utm, to_latlong=True)
west_buffered, south_buffered, east_buffered, north_buffered = polygon_buff.bounds
response_jsons = osm_net_download(north=north_buffered, south=south_buffered,
east=east_buffered, west=west_buffered,
network_type=network_type, timeout=timeout,
memory=memory, max_query_area_size=max_query_area_size,
infrastructure=infrastructure, custom_filter=custom_filter)
G_buffered = create_graph(response_jsons, name=name, retain_all=retain_all,
bidirectional=network_type in settings.bidirectional_network_types)
G = truncate_graph_bbox(G_buffered, north, south, east, west, retain_all=True, truncate_by_edge=truncate_by_edge)
G_buffered = simplify_graph(G_buffered)
G = truncate_graph_bbox(G_buffered, north, south, east, west, retain_all=retain_all, truncate_by_edge=truncate_by_edge)
G.graph[] = count_streets_per_node(G_buffered, nodes=G.nodes())
else:
response_jsons = osm_net_download(north=north, south=south, east=east,
west=west, network_type=network_type,
timeout=timeout, memory=memory,
max_query_area_size=max_query_area_size,
infrastructure=infrastructure, custom_filter=custom_filter)
G = create_graph(response_jsons, name=name, retain_all=retain_all,
bidirectional=network_type in settings.bidirectional_network_types)
G = truncate_graph_bbox(G, north, south, east, west, retain_all=retain_all, truncate_by_edge=truncate_by_edge)
return G
|
Create a networkx graph from OSM data within some bounding box.
Parameters
----------
north : float
northern latitude of bounding box
south : float
southern latitude of bounding box
east : float
eastern longitude of bounding box
west : float
western longitude of bounding box
network_type : string
what type of street network to get
simplify : bool
if true, simplify the graph topology
retain_all : bool
if True, return the entire graph even if it is not connected
truncate_by_edge : bool
if True retain node if it's outside bbox but at least one of node's
neighbors are within bbox
name : string
the name of the graph
timeout : int
the timeout interval for requests and to pass to API
memory : int
server memory allocation size for the query, in bytes. If none, server
will use its default allocation size
max_query_area_size : float
max size for any part of the geometry, in square degrees: any polygon
bigger will get divided up for multiple queries to API
clean_periphery : bool
if True (and simplify=True), buffer 0.5km to get a graph larger than
requested, then simplify, then truncate it to requested spatial extent
infrastructure : string
download infrastructure of given type (default is streets (ie, 'way["highway"]') but other
infrastructures may be selected like power grids (ie, 'way["power"~"line"]'))
custom_filter : string
a custom network filter to be used instead of the network_type presets
Returns
-------
networkx multidigraph
|
26,955 |
def _register_process_with_cgrulesengd(pid):
from ctypes import cdll
try:
libcgroup = cdll.LoadLibrary()
failure = libcgroup.cgroup_init()
if failure:
pass
else:
CGROUP_DAEMON_UNCHANGE_CHILDREN = 0x1
failure = libcgroup.cgroup_register_unchanged_process(pid, CGROUP_DAEMON_UNCHANGE_CHILDREN)
if failure:
pass
except OSError:
pass
|
Tell cgrulesengd daemon to not move the given process into other cgroups,
if libcgroup is available.
|
26,956 |
def is_overlapping_viewport(self, hotspot, xy):
l1, t1, r1, b1 = calc_bounds(xy, hotspot)
l2, t2, r2, b2 = calc_bounds(self._position, self._device)
return range_overlap(l1, r1, l2, r2) and range_overlap(t1, b1, t2, b2)
|
Checks to see if the hotspot at position ``(x, y)``
is (at least partially) visible according to the
position of the viewport.
|
26,957 |
def _update(self):
r
self._grad.get_grad(self._x_old)
for i in range(self._prox_list.size):
z_temp = (2 * self._x_old - self._z[i] - self._gamma *
self._grad.grad)
z_prox = self._prox_list[i].op(z_temp, extra_factor=self._gamma /
self._weights[i])
self._z[i] += self._lambda_param * (z_prox - self._x_old)
self._x_new = np.sum([z_i * w_i for z_i, w_i in
zip(self._z, self._weights)], axis=0)
np.copyto(self._x_old, self._x_new)
self._update_param()
if self._cost_func:
self.converge = self._cost_func.get_cost(self._x_new)
|
r"""Update
This method updates the current reconstruction
Notes
-----
Implements algorithm 1 from [R2012]_
|
26,958 |
def setup(db_class, simple_object_cls, primary_keys):
table_name = simple_object_cls.__name__
column_names = simple_object_cls.FIELDS
metadata = MetaData()
table = Table(table_name, metadata,
*[Column(cname, _get_best_column_type(cname),
primary_key=cname in primary_keys)
for cname in column_names])
db_class.metadata = metadata
db_class.mapper_class = simple_object_cls
db_class.table = table
mapper(simple_object_cls, table)
|
A simple API to configure the metadata
|
26,959 |
def url(self):
local_path = self._find_in_local()
if local_path:
return local_path
if not self._url:
self._refresh_url()
elif time.time() > self._expired_at:
logger.info(.format(self))
self._refresh_url()
return self._url
|
We will always check if this song file exists in local library,
if true, we return the url of the local file.
.. note::
As netease song url will be expired after a period of time,
we can not use static url here. Currently, we assume that the
expiration time is 20 minutes, after the url expires, it
will be automaticly refreshed.
|
26,960 |
def dynamics_from_bundle(b, times, compute=None, return_euler=False, **kwargs):
b.run_delayed_constraints()
computeps = b.get_compute(compute, check_visible=False, force_ps=True)
ltte = computeps.get_value(, check_visible=False, **kwargs)
times = np.array(times)
vgamma = b.get_value(, context=, unit=u.solRad/u.d)
t0 = b.get_value(, context=, unit=u.d)
hier = b.hierarchy
starrefs = hier.get_stars()
orbitrefs = hier.get_orbits()
s = b.filter(context=)
periods, eccs, smas, t0_perpasses, per0s, long_ans, incls, dpdts, \
deccdts, dperdts, components = [],[],[],[],[],[],[],[],[],[],[]
for component in starrefs:
ancestororbits = []
comp = component
while hier.get_parent_of(comp) in orbitrefs:
comp = hier.get_parent_of(comp)
ancestororbits.append(comp)
periods.append([s.get_value(, u.d, component=orbit) for orbit in ancestororbits])
eccs.append([s.get_value(, component=orbit) for orbit in ancestororbits])
t0_perpasses.append([s.get_value(, u.d, component=orbit) for orbit in ancestororbits])
per0s.append([s.get_value(, u.rad, component=orbit) for orbit in ancestororbits])
long_ans.append([s.get_value(, u.rad, component=orbit) for orbit in ancestororbits])
incls.append([s.get_value(, u.rad, component=orbit) for orbit in ancestororbits])
dpdts.append([s.get_value(, u.d/u.d, component=orbit) for orbit in ancestororbits])
if conf.devel:
deccdts.append([s.get_value(, u.dimensionless_unscaled/u.d, component=orbit) for orbit in ancestororbits])
else:
deccdts.append([0.0 for orbit in ancestororbits])
dperdts.append([s.get_value(, u.rad/u.d, component=orbit) for orbit in ancestororbits])
smas.append(smas_this)
components.append([hier.get_primary_or_secondary(component=comp) for comp in [component]+ancestororbits[:-1]])
return dynamics(times, periods, eccs, smas, t0_perpasses, per0s, \
long_ans, incls, dpdts, deccdts, dperdts, \
components, t0, vgamma, \
mass_conservation=True, ltte=ltte, return_euler=return_euler)
|
Parse parameters in the bundle and call :func:`dynamics`.
See :func:`dynamics` for more detailed information.
NOTE: you must either provide compute (the label) OR all relevant options
as kwargs (ltte)
Args:
b: (Bundle) the bundle with a set hierarchy
times: (list or array) times at which to run the dynamics
return_euler: (bool, default=False) whether to include euler angles
in the return
Returns:
t, xs, ys, zs, vxs, vys, vzs [, theta, longan, incl].
t is a numpy array of all times,
the remaining are a list of numpy arrays (a numpy array per
star - in order given by b.hierarchy.get_stars()) for the cartesian
positions and velocities of each star at those same times.
Euler angles (theta, longan, incl) are only returned if return_euler is
set to True.
|
26,961 |
def __create(self, client_id, client_secret, calls, **kwargs):
params = {
: client_id,
: client_secret,
: calls
}
return self.make_call(self.__create, params, kwargs)
|
Call documentation: `/batch/create
<https://www.wepay.com/developer/reference/batch#create>`_, plus extra
keyword parameter:
:keyword str access_token: will be used instead of instance's
``access_token``
|
26,962 |
def arg_file_is_new(parser, arg, mode=):
if os.path.exists(arg):
parser.error("\nThe file \"%s\"\nalready exists and "
"cannot be overwritten!" % arg)
else:
handler = open(arg, mode=mode)
return handler
|
Auxiliary function to give an error if the file already exists.
Parameters
----------
parser : parser object
Instance of argparse.ArgumentParser()
arg : string
File name.
mode : string
Optional string that specifies the mode in which the file is
opened.
Returns
-------
handler : file object
Open file handle.
|
26,963 |
def bounds_from_opts(
wkt_geometry=None, point=None, bounds=None, zoom=None, raw_conf=None
):
if wkt_geometry:
return wkt.loads(wkt_geometry).bounds
elif point:
x, y = point
zoom_levels = get_zoom_levels(
process_zoom_levels=raw_conf["zoom_levels"],
init_zoom_levels=zoom
)
tp = raw_conf_process_pyramid(raw_conf)
return tp.tile_from_xy(x, y, max(zoom_levels)).bounds
else:
return bounds
|
Loads the process pyramid of a raw configuration.
Parameters
----------
raw_conf : dict
Raw mapchete configuration as dictionary.
Returns
-------
BufferedTilePyramid
|
26,964 |
def drop_all(self, checkfirst: bool = True) -> None:
self.session.close()
self.base.metadata.drop_all(bind=self.engine, checkfirst=checkfirst)
|
Drop all data, tables, and databases for the PyBEL cache.
:param checkfirst: Check if the database exists before trying to drop it
|
26,965 |
def use_software_cache(sw_dir=None, reload_deps=False):
sw_dir = get_sw_dir(sw_dir)
if os.path.exists(sw_dir):
sys.path.insert(1, sw_dir)
if reload_deps:
reload_dependencies()
|
Adjusts ``sys.path`` so that the cached software at *sw_dir* is used. *sw_dir* is evaluated with
:py:func:`get_sw_dir`. When *reload_deps* is *True*, :py:func:`reload_dependencies` is invoked.
|
26,966 |
def fill(self, **kwargs):
setattr(self.obj, self.name, self.get(**kwargs))
|
Loads the relationships into this model. They are not loaded by
default
|
26,967 |
def get_ns2nts(results, fldnames=None, **kws):
ns2nts = cx.defaultdict(list)
nts = MgrNtGOEAs(results).get_goea_nts_all(fldnames, **kws)
for ntgoea in nts:
ns2nts[ntgoea.NS].append(ntgoea)
return ns2nts
|
Get namedtuples of GOEA results, split into BP, MF, CC.
|
26,968 |
def pad_dialogues(self, dialogues):
self.log(, )
return [self.pad_dialogue(d) for d in dialogues]
|
Pad the entire dataset.
This involves adding padding at the end of each sentence, and in the case of
a hierarchical model, it also involves adding padding at the end of each dialogue,
so that every training sample (dialogue) has the same dimension.
|
26,969 |
def dispatch(self, request, *args, **kwargs):
if in self.kwargs:
try:
self.invoices = Invoice.objects.filter(pk=self.kwargs.get())[:]
except ValueError:
raise Http404()
if not self.invoices:
raise Http404()
else:
ids = request.GET.get(, )
try:
self.invoices = Invoice.objects.filter(id__in=[x for x in ids.split()])[:]
except ValueError:
return HttpResponseBadRequest(_())
if not self.invoices:
return HttpResponseBadRequest(_())
toNotify = []
cannotNotify = []
for invoice in self.invoices:
if invoice.get_default_recipients():
toNotify.append(invoice)
else:
cannotNotify.append(invoice)
self.toNotify = toNotify
self.cannotNotify = cannotNotify
return super(InvoiceNotificationView, self).dispatch(request, *args, **kwargs)
|
Get the set of invoices for which to permit notifications
|
26,970 |
def show_front_page_groups(self, group_id):
path = {}
data = {}
params = {}
path["group_id"] = group_id
self.logger.debug("GET /api/v1/groups/{group_id}/front_page with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/groups/{group_id}/front_page".format(**path), data=data, params=params, single_item=True)
|
Show front page.
Retrieve the content of the front page
|
26,971 |
def createWcsHDU(self):
hdu = fits.ImageHDU()
hdu.header[] =
hdu.header[] = 1
hdu.header[] = (2, "number of World Coordinate System axes")
hdu.header[] = (self.naxis1, "Length of array axis 1")
hdu.header[] = (self.naxis2, "Length of array axis 2")
hdu.header[] = (0.0, "values of pixels in array")
excluded_keys = [,]
for key in self.wcskeys:
_dkey = self.wcstrans[key]
if _dkey not in excluded_keys:
hdu.header[key] = self.__dict__[_dkey]
return hdu
|
Generate a WCS header object that can be used to
populate a reference WCS HDU.
|
26,972 |
def iter_tiles(self, include_controller=True):
for address, tile in sorted(self._tiles.items()):
if address == 8 and not include_controller:
continue
yield address, tile
|
Iterate over all tiles in this device in order.
The ordering is by tile address which places the controller tile
first in the list.
Args:
include_controller (bool): Include the controller tile in the
results.
Yields:
int, EmulatedTile: A tuple with the tile address and tile object.
|
26,973 |
def _rd_dat_file(file_name, dir_name, pb_dir, fmt, start_byte, n_samp):
if fmt == :
byte_count = _required_byte_num(, , n_samp)
element_count = byte_count
elif fmt in [, ]:
byte_count = _required_byte_num(, fmt, n_samp)
element_count = byte_count
else:
element_count = n_samp
byte_count = n_samp * BYTES_PER_SAMPLE[fmt]
if pb_dir is None:
with open(os.path.join(dir_name, file_name), ) as fp:
fp.seek(start_byte)
sig_data = np.fromfile(fp, dtype=np.dtype(DATA_LOAD_TYPES[fmt]),
count=element_count)
else:
sig_data = download._stream_dat(file_name, pb_dir, byte_count,
start_byte,
np.dtype(DATA_LOAD_TYPES[fmt]))
return sig_data
|
Read data from a dat file, either local or remote, into a 1d numpy
array.
This is the lowest level dat reading function (along with
`_stream_dat` which this function may call), and is called by
`_rd_dat_signals`.
Parameters
----------
start_byte : int
The starting byte number to read from.
n_samp : int
The total number of samples to read. Does NOT need to create
whole blocks for special format. Any number of samples should be
readable.
* other params
See docstring for `_rd_dat_signals`
Returns
-------
sig_data : numpy array
The data read from the dat file. The dtype varies depending on
fmt. Byte aligned fmts are read in their final required format.
Unaligned formats are read as uint8 to be further processed.
Notes
-----
See docstring notes for `_rd_dat_signals`
|
26,974 |
def called_with(self, *args, **kwargs):
expected_call = Call(*args, **kwargs)
if expected_call in calls(self.spy):
return True
raise VerificationError(
"expected %s to be called with %s, but it wasn't" % (
self.spy, expected_call.formatted_args))
|
Return True if the spy was called with the specified args/kwargs.
Otherwise raise VerificationError.
|
26,975 |
def create_user(name, groups=None, key_file=None):
groups = groups or []
if not user_exists(name):
for group in groups:
if not group_exists(group):
sudo(u"addgroup %s" % group)
groups = groups and u % u.join(groups) or
sudo(u"useradd -m %s -s /bin/bash %s" % (groups, name))
sudo(u"passwd -d %s" % name)
if key_file:
sudo(u"mkdir -p /home/%s/.ssh" % name)
put(key_file, u"/home/%s/.ssh/authorized_keys" % name, use_sudo=True)
sudo(u"chown -R %(name)s:%(name)s /home/%(name)s/.ssh" % {: name})
|
Create a user. Adds a key file to authorized_keys if given.
|
26,976 |
def _script_load(script):
script = script.encode() if isinstance(script, six.text_type) else script
sha = [None, sha1(script).hexdigest()]
def call(conn, keys=[], args=[], force_eval=False):
keys = tuple(keys)
args = tuple(args)
if not force_eval:
if not sha[0]:
try:
return conn.execute_command(
, script, len(keys), *(keys + args))
finally:
del sha[:-1]
try:
return conn.execute_command(
"EVALSHA", sha[0], len(keys), *(keys+args))
except redis.exceptions.ResponseError as msg:
if not any(msg.args[0].startswith(nsm) for nsm in NO_SCRIPT_MESSAGES):
raise
return conn.execute_command(
"EVAL", script, len(keys), *(keys+args))
return call
|
Borrowed/modified from my book, Redis in Action:
https://github.com/josiahcarlson/redis-in-action/blob/master/python/ch11_listing_source.py
Used for Lua scripting support when writing against Redis 2.6+ to allow
for multiple unique columns per model.
|
26,977 |
def _fusion_range_to_dsl(tokens) -> FusionRangeBase:
if FUSION_MISSING in tokens:
return missing_fusion_range()
return fusion_range(
reference=tokens[FUSION_REFERENCE],
start=tokens[FUSION_START],
stop=tokens[FUSION_STOP]
)
|
Convert a PyParsing data dictionary into a PyBEL.
:type tokens: ParseResult
|
26,978 |
def read_partial_map(filenames, column, fullsky=True, **kwargs):
kwargs[] = [] + np.atleast_1d(column).tolist()
filenames = np.atleast_1d(filenames)
header = fitsio.read_header(filenames[0],ext=kwargs.get(,1))
data = ugali.utils.fileio.load_files(filenames,**kwargs)
pix = data[]
value = data[column]
nside = header[]
npix = hp.nside2npix(nside)
ndupes = len(pix) - len(np.unique(pix))
if ndupes > 0:
msg = %(ndupes)
raise Exception(msg)
if fullsky and not np.isscalar(column):
raise Exception("Cannot make fullsky map from list of columns.")
if fullsky:
shape = list(value.shape)
shape[0] = npix
hpxmap = hp.UNSEEN * np.ones(shape,dtype=value.dtype)
hpxmap[pix] = value
return (nside,pix,hpxmap.T)
else:
return (nside,pix,value.T)
|
Read a partial HEALPix file(s) and return pixels and values/map. Can
handle 3D healpix maps (pix, value, zdim). Returned array has
shape (dimz,npix).
Parameters:
-----------
filenames : list of input filenames
column : column of interest
fullsky : partial or fullsky map
kwargs : passed to fitsio.read
Returns:
--------
(nside,pix,map) : pixel array and healpix map (partial or fullsky)
|
26,979 |
def clean(self, value):
if (
self.base_type is not None and
value is not None and
not isinstance(value, self.base_type)
):
if isinstance(self.base_type, tuple):
allowed_types = [typ.__name__ for typ in self.base_type]
allowed_types_text = .join(allowed_types)
else:
allowed_types_text = self.base_type.__name__
err_msg = % allowed_types_text
raise ValidationError(err_msg)
if not self.has_value(value):
if self.default is not None:
raise StopValidation(self.default)
if self.required:
raise ValidationError()
else:
raise StopValidation(self.blank_value)
return value
|
Take a dirty value and clean it.
|
26,980 |
def register_view(self, view):
super(SemanticDataEditorController, self).register_view(view)
if isinstance(self.model.state, LibraryState) or self.model.state.get_next_upper_library_root_state():
self.set_editor_lock(True)
view[].connect(, self.open_externally_clicked)
view[].connect(, self.on_add, False)
view[].connect(, self.on_add, True)
view[].connect(, self.on_remove)
self._apply_value_on_edited_and_focus_out(self.widget_columns[view.KEY_COLUMN_ID].get_cells()[0],
self.key_edited)
self._apply_value_on_edited_and_focus_out(self.widget_columns[view.VALUE_COLUMN_ID].get_cells()[0],
self.value_edited)
self.reload_tree_store_data()
|
Called when the View was registered
Can be used e.g. to connect signals. Here, the destroy signal is connected to close the application
:param rafcon.gui.views.state_editor.semantic_data_editor.SemanticDataEditorView view: An view to show all
semantic data of a state
|
26,981 |
def disable_if_no_tty(cls):
if sys.stdout.isatty() or sys.stderr.isatty():
return False
cls.disable_all_colors()
return True
|
Disable all colors only if there is no TTY available.
:return: True if colors are disabled, False if stderr or stdout is a TTY.
:rtype: bool
|
26,982 |
def render_done(self, form, **kwargs):
if kwargs.get(, None) != self.done_step_name:
return redirect(self.url_name, step=self.done_step_name)
return super(NamedUrlWizardView, self).render_done(form, **kwargs)
|
When rendering the done view, we have to redirect first (if the URL
name doesn't fit).
|
26,983 |
def bulk_write(self, requests, **kwargs):
self._arctic_lib.check_quota()
return self._collection.bulk_write(requests, **kwargs)
|
See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.bulk_write
Warning: this is wrapped in mongo_retry, and is therefore potentially unsafe if the write you want to execute
isn't idempotent.
|
26,984 |
def time_correlation_by_diagonalization(P, pi, obs1, obs2=None, time=1, rdl=None):
if rdl is None:
raise ValueError("no rdl decomposition")
R, D, L = rdl
d_times = np.diag(D) ** time
diag_inds = np.diag_indices_from(D)
D_time = np.zeros(D.shape, dtype=d_times.dtype)
D_time[diag_inds] = d_times
P_time = np.dot(np.dot(R, D_time), L)
l = np.multiply(obs1, pi)
m = np.dot(P_time, obs2)
result = np.dot(l, m)
return result
|
calculates time correlation. Raises P to power 'times' by diagonalization.
If rdl tuple (R, D, L) is given, it will be used for
further calculation.
|
26,985 |
def date_time_this_year():
now = datetime.now()
this_year_start = now.replace(
month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
this_year_days = sum(calendar.mdays)
random_seconds = random.randint(0, this_year_days*A_DAY_SECONDS)
return this_year_start + timedelta(seconds=random_seconds)
|
获取当前年的随机时间字符串
:return:
* date_this_year: (datetime) 当前月份的随机时间
举例如下::
print('--- GetRandomTime.date_time_this_year demo ---')
print(GetRandomTime.date_time_this_year())
print('---')
执行结果::
--- GetRandomTime.date_time_this_year demo demo ---
2018-02-08 17:16:09
---
|
26,986 |
def QA_SU_save_future_day(engine, client=DATABASE):
engine = select_save_engine(engine)
engine.QA_SU_save_future_day(client=client)
|
save future_day
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
|
26,987 |
def calculate_job_input_hash(job_spec, workflow_json):
if in job_spec:
del job_spec[]
job_md5_buffer = md5()
job_md5_buffer.update(json.dumps(job_spec).encode())
job_md5_buffer.update(json.dumps(workflow_json).encode())
return job_md5_buffer.hexdigest()
|
Calculate md5 hash of job specification and workflow json.
|
26,988 |
def get_unique_backends():
backends = IBMQ.backends()
unique_hardware_backends = []
unique_names = []
for back in backends:
if back.name() not in unique_names and not back.configuration().simulator:
unique_hardware_backends.append(back)
unique_names.append(back.name())
if not unique_hardware_backends:
raise QiskitError()
return unique_hardware_backends
|
Gets the unique backends that are available.
Returns:
list: Unique available backends.
Raises:
QiskitError: No backends available.
|
26,989 |
def xbm(self, scale=1, quiet_zone=4):
return builder._xbm(self.code, scale, quiet_zone)
|
Returns a string representing an XBM image of the QR code.
The XBM format is a black and white image format that looks like a
C header file.
Because displaying QR codes in Tkinter is the
primary use case for this renderer, this method does not take a file
parameter. Instead it retuns the rendered QR code data as a string.
Example of using this renderer with Tkinter:
>>> import pyqrcode
>>> import tkinter
>>> code = pyqrcode.create('Knights who say ni!')
>>> code_xbm = code.xbm(scale=5)
>>>
>>> top = tkinter.Tk()
>>> code_bmp = tkinter.BitmapImage(data=code_xbm)
>>> code_bmp.config(foreground="black")
>>> code_bmp.config(background="white")
>>> label = tkinter.Label(image=code_bmp)
>>> label.pack()
The *scale* parameter sets how large to draw a single module. By
default one pixel is used to draw a single module. This may make the
code too small to be read efficiently. Increasing the scale will make
the code larger. Only integer scales are usable. This method will
attempt to coerce the parameter into an integer (e.g. 2.5 will become 2,
and '3' will become 3). You can use the :py:meth:`get_png_size` method
to calculate the actual pixel size of this image when displayed.
The *quiet_zone* parameter sets how wide the quiet zone around the code
should be. According to the standard this should be 4 modules. It is
left settable because such a wide quiet zone is unnecessary in many
applications where the QR code is not being printed.
|
26,990 |
def BGPNeighborPrefixExceeded_originator_switch_info_switchIpV4Address(self, **kwargs):
config = ET.Element("config")
BGPNeighborPrefixExceeded = ET.SubElement(config, "BGPNeighborPrefixExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream")
originator_switch_info = ET.SubElement(BGPNeighborPrefixExceeded, "originator-switch-info")
switchIpV4Address = ET.SubElement(originator_switch_info, "switchIpV4Address")
switchIpV4Address.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
26,991 |
def square_batch_region(data, region, bam_files, vrn_files, out_file):
from bcbio.variation import sentieon, strelka2
if not utils.file_exists(out_file):
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
if jointcaller in ["%s-joint" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "square")
elif jointcaller in ["%s-merge" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "merge")
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gatk"]]:
gatkjoint.run_region(data, region, vrn_files, out_file)
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gvcf"]]:
strelka2.run_gvcfgenotyper(data, region, vrn_files, out_file)
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["sentieon"]]:
sentieon.run_gvcftyper(vrn_files, out_file, region, data)
else:
raise ValueError("Unexpected joint calling approach: %s." % jointcaller)
if region:
data["region"] = region
data = _fix_orig_vcf_refs(data)
data["vrn_file"] = out_file
return [data]
|
Perform squaring of a batch in a supplied region, with input BAMs
|
26,992 |
def run_sparql_on(q, ontology):
logging.info("Connecting to " + ontology.value + " SPARQL endpoint...")
sparql = SPARQLWrapper(ontology.value)
logging.info("Made wrapper: {}".format(sparql))
sparql.setQuery(q)
sparql.setReturnFormat(JSON)
logging.info("Query: {}".format(q))
results = sparql.query().convert()
bindings = results[][]
logging.info("Rows: {}".format(len(bindings)))
for r in bindings:
curiefy(r)
return bindings
|
Run a SPARQL query (q) on a given Ontology (Enum EOntology)
|
26,993 |
def delete_namespaced_role_binding(self, name, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_namespaced_role_binding_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_role_binding_with_http_info(name, namespace, **kwargs)
return data
|
delete a RoleBinding
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_role_binding(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the RoleBinding (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
|
26,994 |
def returner(ret):
_options = _get_options(ret)
chat_id = _options.get()
token = _options.get()
if not chat_id:
log.error()
if not token:
log.error()
returns = ret.get()
message = (
).format(
ret.get(),
ret.get(),
ret.get(),
ret.get(),
returns)
return __salt__[](message,
chat_id=chat_id,
token=token)
|
Send a Telegram message with the data.
:param ret: The data to be sent.
:return: Boolean if message was sent successfully.
|
26,995 |
def _set_dhcpd(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=dhcpd.dhcpd, is_container=, presence=False, yang_name="dhcpd", rest_name="dhcpd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__dhcpd = t
if hasattr(self, ):
self._set()
|
Setter method for dhcpd, mapped from YANG variable /rbridge_id/dhcpd (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_dhcpd is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dhcpd() directly.
|
26,996 |
def rowget(self,tables_dict,row_list,index):
"row_list in self.row_order"
tmp=row_list
for i in self.index_tuple(tables_dict,index,False): tmp=tmp[i]
return tmp
|
row_list in self.row_order
|
26,997 |
def fmt_val(val, shorten=True):
val = repr(val)
max = 50
if shorten:
if len(val) > max:
close = val[-1]
val = val[0:max-4] + "..."
if close in (">", ""]})'):
val = val + close
return val
|
Format a value for inclusion in an
informative text string.
|
26,998 |
def rebase_all_branches(self):
col_width = max(len(b.name) for b in self.branches) + 1
if self.repo.head.is_detached:
raise GitError("Youm exiting"
" in case youbold ./t exist!
print(colored(t existredremote branch doesn\)
continue
if target.is_local:
target = find(self.repo.branches,
lambda b: b.name == target.name[2:])
if target.commit.hexsha == branch.commit.hexsha:
print(colored(, ))
self.states.append()
continue
base = self.git.merge_base(branch.name, target.name)
if base == target.commit.hexsha:
print(colored(, ))
self.states.append()
continue
fast_fastforward = False
if base == branch.commit.hexsha:
print(colored(, ), end=)
self.states.append()
original_branch.checkout()
|
Rebase all branches, if possible.
|
26,999 |
def select_logfile(self, logfile):
data = + logfile
r = self._basic_post(url=, data=data)
return r.json()
|
Parameters
----------
logfile : str
Returns
-------
dict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.