Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
379,700 | def compute_exit_code(config, exception=None):
code = 0
if exception is not None:
code = code | 1
if config.surviving_mutants > 0:
code = code | 2
if config.surviving_mutants_timeout > 0:
code = code | 4
if config.suspicious_mutants > 0:
code = code | 8
return code | Compute an exit code for mutmut mutation testing
The following exit codes are available for mutmut:
* 0 if all mutants were killed (OK_KILLED)
* 1 if a fatal error occurred
* 2 if one or more mutants survived (BAD_SURVIVED)
* 4 if one or more mutants timed out (BAD_TIMEOUT)
* 8 if one or more mutants caused tests to take twice as long (OK_SUSPICIOUS)
Exit codes 1 to 8 will be bit-ORed so that it is possible to know what
different mutant statuses occurred during mutation testing.
:param exception:
:type exception: Exception
:param config:
:type config: Config
:return: integer noting the exit code of the mutation tests.
:rtype: int |
379,701 | def _get_nd_basic_indexing(self, key):
shape = self.shape
if isinstance(key, integer_types):
if key > shape[0] - 1:
raise IndexError(
.format(
key, shape[0]))
return self._at(key)
elif isinstance(key, py_slice):
if key.step is not None and key.step != 1:
if key.step == 0:
raise ValueError("slice step cannot be zero")
return op.slice(self, begin=(key.start,), end=(key.stop,), step=(key.step,))
elif key.start is not None or key.stop is not None:
return self._slice(key.start, key.stop)
else:
return self
if not isinstance(key, tuple):
raise ValueError(
% (str(key), str(type(key))))
assert len(key) != 0,
begin = []
end = []
step = []
kept_axes = []
i = -1
for i, slice_i in enumerate(key):
if isinstance(slice_i, integer_types):
begin.append(slice_i)
end.append(slice_i+1 if slice_i != -1 else self.shape[i])
step.append(1)
elif isinstance(slice_i, py_slice):
if slice_i.step == 0:
raise ValueError(
% (str(key), str(slice_i)))
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
kept_axes.append(i)
else:
raise ValueError(
% (str(slice_i), str(type(slice_i))))
kept_axes.extend(range(i+1, len(shape)))
sliced_nd = op.slice(self, begin, end, step)
if len(kept_axes) == len(shape):
return sliced_nd
oshape = []
sliced_shape = sliced_nd.shape
for axis in kept_axes:
oshape.append(sliced_shape[axis])
if len(oshape) == 0:
oshape.append(1)
oshape = tuple(oshape)
assert np.prod(oshape) == np.prod(sliced_shape), \
\
% (oshape, sliced_shape)
return sliced_nd.reshape(oshape) | This function is called when key is a slice, or an integer,
or a tuple of slices or integers |
379,702 | def push_session(document, session_id=None, url=, io_loop=None):
s neither
scalable nor secure to use predictable session IDs or to share session
IDs across users.
For a notebook running on a single machine, ``session_id`` could be
something human-readable such as ``"default"`` for convenience.
If you allow ``push_session()`` to generate a unique ``session_id``, you
can obtain the generated ID with the ``id`` property on the returned
``ClientSession``.
Args:
document : (bokeh.document.Document)
The document to be pushed and set as session.document
session_id : (string, optional)
The name of the session, None to autogenerate a random one (default: None)
url : (str, optional): The URL to a Bokeh application on a Bokeh server
can also be `"default"` which will connect to the default app URL
io_loop : (tornado.ioloop.IOLoop, optional)
The IOLoop to use for the websocket
Returns:
ClientSession
A new ClientSession connected to the server
'
coords = _SessionCoordinates(session_id=session_id, url=url)
session = ClientSession(session_id=coords.session_id, websocket_url=websocket_url_for_server_url(coords.url), io_loop=io_loop)
session.push(document)
return session | Create a session by pushing the given document to the server,
overwriting any existing server-side document.
``session.document`` in the returned session will be your supplied
document. While the connection to the server is open, changes made on the
server side will be applied to this document, and changes made on the
client side will be synced to the server.
In a production scenario, the ``session_id`` should be unique for each
browser tab, which keeps users from stomping on each other. It's neither
scalable nor secure to use predictable session IDs or to share session
IDs across users.
For a notebook running on a single machine, ``session_id`` could be
something human-readable such as ``"default"`` for convenience.
If you allow ``push_session()`` to generate a unique ``session_id``, you
can obtain the generated ID with the ``id`` property on the returned
``ClientSession``.
Args:
document : (bokeh.document.Document)
The document to be pushed and set as session.document
session_id : (string, optional)
The name of the session, None to autogenerate a random one (default: None)
url : (str, optional): The URL to a Bokeh application on a Bokeh server
can also be `"default"` which will connect to the default app URL
io_loop : (tornado.ioloop.IOLoop, optional)
The IOLoop to use for the websocket
Returns:
ClientSession
A new ClientSession connected to the server |
379,703 | def repr_type(obj):
the_type = type(obj)
if (not py3compat.PY3) and the_type is InstanceType:
the_type = obj.__class__
msg = % (obj, the_type)
return msg | Return a string representation of a value and its type for readable
error messages. |
379,704 | def detect_types(
field_names,
field_values,
field_types=DEFAULT_TYPES,
skip_indexes=None,
type_detector=TypeDetector,
fallback_type=TextField,
*args,
**kwargs
):
detector = type_detector(
field_names,
field_types=field_types,
fallback_type=fallback_type,
skip_indexes=skip_indexes,
)
detector.feed(field_values)
return detector.fields | Detect column types (or "where the magic happens") |
379,705 | def main(args):
(args, opts) = _surface_to_ribbon_parser(args)
if opts[]:
print(info, file=sys.stdout)
return 1
verbose = opts[]
def note(s):
if verbose: print(s, file=sys.stdout)
return verbose
if in opts and opts[] is not None:
add_subject_path(opts[])
(lhfl, rhfl) = (opts[], opts[])
if len(args) == 0:
raise ValueError()
elif len(args) == 1:
sub = find_subject_path(os.getenv())
outfl = args[0]
elif len(args) == 2:
sbpth = find_subject_path(args[0])
if sbpth is not None:
sub = sbpth
else:
sub = find_subject_path(os.getenv())
if lhfl is not None: rhfl = args[0]
elif rhfl is not None: lhfl = args[0]
else: raise ValueError( % args[0])
outfl = args[1]
elif len(args) == 3:
sbpth0 = find_subject_path(args[0])
sbpth1 = find_subject_path(args[1])
if sbpth0 is not None:
sub = sbpth0
if lhfl is not None: rhfl = args[1]
elif rhfl is not None: lhfl = args[1]
else: raise ValueError( % args[1])
elif sbpth1 is not None:
sub = sbpth1
if lhfl is not None: rhfl = args[0]
elif rhfl is not None: lhfl = args[0]
else: raise ValueError( % args[0])
else:
sub = find_subject_path(os.getenv())
if lhfl is not None or rhfl is not None:
raise ValueError()
(lhfl, rhfl) = args
outfl = args[2]
elif len(args) == 4:
if lhfl is not None or rhfl is not None:
raise ValueError()
subidx = next((i for (i,a) in enumerate(args) if find_subject_path(a) is not None), None)
if subidx is None: raise ValueError()
sub = find_subject_path(args[subidx])
del args[subidx]
(lhfl, rhfl, outfl) = args
else:
raise ValueError()
if sub is None: raise ValueError()
if lhfl is None and rhfl is None: raise ValueError()
method = opts[].lower()
if method not in [, , , ]:
raise ValueError( % method)
if opts[] is None: dtyp = None
elif opts[].lower() == : dtyp = np.float32
elif opts[].lower() == : dtyp = np.int32
else: raise ValueError()
if method == :
if dtyp is np.float32: method =
elif dtyp is np.int32: method =
else: method =
note()
(lhdat, rhdat) = (None, None)
if lhfl is not None:
note( % lhfl)
lhdat = read_surf_file(lhfl)
if rhfl is not None:
note( % rhfl)
rhdat = read_surf_file(rhfl)
(dat, hemi) = (rhdat, ) if lhdat is None else \
(lhdat, ) if rhdat is None else \
((lhdat, rhdat), None)
sub = subject(sub)
note()
vol = sub.cortex_to_image(dat, hemi=hemi, method=method, fill=opts[], dtype=dtyp)
note( % outfl)
save(outfl, vol, affine=sub.voxel_to_native_matrix)
note()
return 0 | surface_to_rubbon.main(args) can be given a list of arguments, such as sys.argv[1:]; these
arguments may include any options and must include exactly one subject id and one output
filename. Additionally one or two surface input filenames must be given. The surface files are
projected into the ribbon and written to the output filename. For more information see the
string stored in surface_to_image.info. |
379,706 | async def create_source_event_stream(
schema: GraphQLSchema,
document: DocumentNode,
root_value: Any = None,
context_value: Any = None,
variable_values: Dict[str, Any] = None,
operation_name: str = None,
field_resolver: GraphQLFieldResolver = None,
) -> Union[AsyncIterable[Any], ExecutionResult]:
assert_valid_execution_arguments(schema, document, variable_values)
context = ExecutionContext.build(
schema,
document,
root_value,
context_value,
variable_values,
operation_name,
field_resolver,
)
if isinstance(context, list):
return ExecutionResult(data=None, errors=context)
type_ = get_operation_root_type(schema, context.operation)
fields = context.collect_fields(type_, context.operation.selection_set, {}, set())
response_names = list(fields)
response_name = response_names[0]
field_nodes = fields[response_name]
field_node = field_nodes[0]
field_name = field_node.name.value
field_def = get_field_def(schema, type_, field_name)
if not field_def:
raise GraphQLError(
f"The subscription field is not defined.", field_nodes
)
resolve_fn = field_def.subscribe or context.field_resolver
resolve_fn = cast(GraphQLFieldResolver, resolve_fn)
path = add_path(None, response_name)
info = context.build_resolve_info(field_def, field_nodes, type_, path)
result = context.resolve_field_value_or_error(
field_def, field_nodes, resolve_fn, root_value, info
)
event_stream = await cast(Awaitable, result) if isawaitable(result) else result
if isinstance(event_stream, Exception):
raise located_error(event_stream, field_nodes, response_path_as_list(path))
if isinstance(event_stream, AsyncIterable):
return cast(AsyncIterable, event_stream)
raise TypeError(
f"Subscription field must return AsyncIterable. Received: {event_stream!r}"
) | Create source even stream
Implements the "CreateSourceEventStream" algorithm described in the GraphQL
specification, resolving the subscription source event stream.
Returns a coroutine that yields an AsyncIterable.
If the client provided invalid arguments, the source stream could not be created,
or the resolver did not return an AsyncIterable, this function will throw an error,
which should be caught and handled by the caller.
A Source Event Stream represents a sequence of events, each of which triggers a
GraphQL execution for that event.
This may be useful when hosting the stateful subscription service in a different
process or machine than the stateless GraphQL execution engine, or otherwise
separating these two steps. For more on this, see the "Supporting Subscriptions
at Scale" information in the GraphQL spec. |
379,707 | def upload_slice_file(self, real_file_path, slice_size, file_name, offset=0, dir_name=None):
if dir_name is not None and dir_name[0] == :
dir_name = dir_name[1:len(dir_name)]
if dir_name is None:
dir_name = ""
self.url = + self.config.region + + str(
self.config.app_id) + + self.config.bucket
if dir_name is not None:
self.url = self.url + + dir_name
self.url = self.url + + file_name
file_size = os.path.getsize(real_file_path)
session = self._upload_slice_control(file_size=file_size, slice_size=slice_size)
with open(real_file_path, ) as local_file:
while offset < file_size:
file_content = local_file.read(slice_size)
self._upload_slice_data(filecontent=file_content, session=session, offset=offset)
offset += slice_size
r = self._upload_slice_finish(session=session, file_size=file_size)
return r | 此分片上传代码由GitHub用户a270443177(https://github.com/a270443177)友情提供
:param real_file_path:
:param slice_size:
:param file_name:
:param offset:
:param dir_name:
:return: |
379,708 | def do_loop(self, params):
repeat = params.repeat
if repeat < 0:
self.show_output("<repeat> must be >= 0.")
return
pause = params.pause
if pause < 0:
self.show_output("<pause> must be >= 0.")
return
cmds = params.cmds
i = 0
with self.transitions_disabled():
while True:
for cmd in cmds:
try:
self.onecmd(cmd)
except Exception as ex:
self.show_output("Command failed: %s.", ex)
if pause > 0.0:
time.sleep(pause)
i += 1
if repeat > 0 and i >= repeat:
break | \x1b[1mNAME\x1b[0m
loop - Runs commands in a loop
\x1b[1mSYNOPSIS\x1b[0m
loop <repeat> <pause> <cmd1> <cmd2> ... <cmdN>
\x1b[1mDESCRIPTION\x1b[0m
Runs <cmds> <repeat> times (0 means forever), with a pause of <pause> secs inbetween
each <cmd> (0 means no pause).
\x1b[1mEXAMPLES\x1b[0m
> loop 3 0 "get /foo"
...
> loop 3 0 "get /foo" "get /bar"
... |
379,709 | def add_artwork_item(self, instance, item):
if in self.old_top[instance][item]:
pass
else:
(item_type, item_id) = item.split()
self.artwork[item_type][item_id] = {}
for s_item in sorted(self.old_top[instance][item]):
if self.old_top[instance][item][s_item] is not None:
s_detail = self.old_top[instance][item][s_item]
s_type = type(s_detail)
if item_type == and s_type == str:
s_detail = s_detail.replace(, )
if s_type == str and len(s_detail) > 1 \
and s_detail[0] == and s_detail[-1] == :
s_detail = s_detail[1:-1]
if item_type == and s_item == :
s_item =
elif s_item == :
s_item =
s_detail = float(s_detail)
self.artwork[item_type][item_id][s_item] = s_detail
if item_type == and \
not in self.artwork[item_type][item_id]:
self.artwork[item_type][item_id][] =
self.artwork[item_type][item_id][] = 0 | Add an artwork item e.g. Shapes, Notes and Pixmaps
:param instance: Hypervisor instance
:param item: Item to add |
379,710 | def load_user(user_email):
user_obj = store.user(user_email)
user_inst = LoginUser(user_obj) if user_obj else None
return user_inst | Returns the currently active user as an object. |
379,711 | def setEnabled(self, state):
super(XToolButton, self).setEnabled(state)
self.updateUi() | Updates the drop shadow effect for this widget on enable/disable
state change.
:param state | <bool> |
379,712 | def get_all_json_from_indexq(self):
files = self.get_all_as_list()
out = []
for efile in files:
out.extend(self._open_file(efile))
return out | Gets all data from the todo files in indexq and returns one huge list of all data. |
379,713 | def create_new_output_file(sampler, filename, force=False, injection_file=None,
**kwargs):
if os.path.exists(filename):
if force:
os.remove(filename)
else:
raise OSError("output-file already exists; use force if you "
"wish to overwrite it.")
logging.info("Creating file {}".format(filename))
with sampler.io(filename, "w") as fp:
fp.create_group(fp.samples_group)
fp.create_group(fp.sampler_group)
fp.write_sampler_metadata(sampler)
if injection_file is not None:
logging.info("Writing injection file to output")
fp.write_injections(injection_file) | Creates a new output file.
If the output file already exists, an ``OSError`` will be raised. This can
be overridden by setting ``force`` to ``True``.
Parameters
----------
sampler : sampler instance
Sampler
filename : str
Name of the file to create.
force : bool, optional
Create the file even if it already exists. Default is False.
injection_file : str, optional
If an injection was added to the data, write its information.
\**kwargs :
All other keyword arguments are passed through to the file's
``write_metadata`` function. |
379,714 | def _get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None):
if ssl_version not in (PROTOCOL_DTLS, PROTOCOL_DTLSv1, PROTOCOL_DTLSv1_2):
return _orig_get_server_certificate(addr, ssl_version, ca_certs)
if ca_certs is not None:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
af = getaddrinfo(addr[0], addr[1])[0][0]
s = ssl.wrap_socket(socket(af, SOCK_DGRAM),
ssl_version=ssl_version,
cert_reqs=cert_reqs, ca_certs=ca_certs)
s.connect(addr)
dercert = s.getpeercert(True)
s.close()
return ssl.DER_cert_to_PEM_cert(dercert) | Retrieve a server certificate
Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt. |
379,715 | def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False):
assert len(gt_boxlists) == len(
pred_boxlists
), "Length of gt and pred lists need to be same."
prec, rec = calc_detection_voc_prec_rec(
pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh
)
ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
return {"ap": ap, "map": np.nanmean(ap)} | Evaluate on voc dataset.
Args:
pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
iou_thresh: iou thresh
use_07_metric: boolean
Returns:
dict represents the results |
379,716 | def metarate(self, func, name=):
setattr(func, name, self.values)
return func | Set the values object to the function object's namespace |
379,717 | def get_snapshots(topology):
snapshots = []
snap_dir = os.path.join(topology_dirname(topology), )
if os.path.exists(snap_dir):
snaps = os.listdir(snap_dir)
for directory in snaps:
snap_top = os.path.join(snap_dir, directory, )
if os.path.exists(snap_top):
snapshots.append({: snap_top,
: True})
return snapshots | Return the paths of any snapshot topologies
:param str topology: topology file
:return: list of dicts containing snapshot topologies
:rtype: list |
379,718 | def find_version_by_string_lib(line):
if not line:
return None
simplified_line = simplify_line(line)
version = None
if simplified_line.startswith("version="):
if not in simplified_line:
pass
else:
if "=" in simplified_line:
post_equals = simplified_line.split("=")[0]
if in post_equals:
parts = post_equals.split()
if len(parts) != 3:
version = parts[0]
return version | No regex parsing. Or at least, mostly, not regex. |
379,719 | def on_delete(self, forced):
if not forced and self.handler is not None and not self.is_closed:
self.promote()
else:
self.close() | Session expiration callback
`forced`
If session item explicitly deleted, forced will be set to True. If
item expired, will be set to False. |
379,720 | def ReadPreprocessingInformation(self, knowledge_base):
generator = self._GetAttributeContainers(
self._CONTAINER_TYPE_SYSTEM_CONFIGURATION)
for stream_number, system_configuration in enumerate(generator):
knowledge_base.ReadSystemConfigurationArtifact(
system_configuration, session_identifier=stream_number) | Reads preprocessing information.
The preprocessing information contains the system configuration which
contains information about various system specific configuration data,
for example the user accounts.
Args:
knowledge_base (KnowledgeBase): is used to store the preprocessing
information. |
379,721 | def _item_to_metric(iterator, log_metric_pb):
resource = MessageToDict(log_metric_pb)
return Metric.from_api_repr(resource, iterator.client) | Convert a metric protobuf to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type log_metric_pb:
:class:`.logging_metrics_pb2.LogMetric`
:param log_metric_pb: Metric protobuf returned from the API.
:rtype: :class:`~google.cloud.logging.metric.Metric`
:returns: The next metric in the page. |
379,722 | def _checkDimensionsListLike(arrays):
dim1 = len(arrays)
dim2, dim3 = arrays[0].shape
for aa in range(1, dim1):
dim2_aa, dim3_aa = arrays[aa].shape
if (dim2_aa != dim2) or (dim3_aa != dim3):
raise _error.InvalidError(_MDPERR["obj_square"])
return dim1, dim2, dim3 | Check that each array in a list of arrays has the same size. |
379,723 | def get_import_stacklevel(import_hook):
py_version = sys.version_info[:2]
if py_version <= (3, 2):
return 4 if import_hook else 2
elif py_version == (3, 3):
return 8 if import_hook else 10
elif py_version == (3, 4):
return 10 if import_hook else 8
else:
return 4 if import_hook else 2 | Returns the stacklevel value for warnings.warn() for when the warning
gets emitted by an imported module, but the warning should point at the
code doing the import.
Pass import_hook=True if the warning gets generated by an import hook
(warn() gets called in load_module(), see PEP302) |
379,724 | def iniedited(self, *args, **kwargs):
self.inimodel.set_index_edited(self.files_lv.currentIndex(), True) | Set the current index of inimodel to modified
:returns: None
:rtype: None
:raises: None |
379,725 | def call_handlers(self, msg):
self.message_received.emit(msg)
msg_type = msg[][]
signal = getattr(self, msg_type + , None)
if signal:
signal.emit(msg)
elif msg_type in (, ):
self.stream_received.emit(msg) | Reimplemented to emit signals instead of making callbacks. |
379,726 | def check_ab(ab, verb):
r
try:
ab = int(ab)
except VariableCatch:
print()
raise
pab = [11, 12, 13, 14, 15, 16, 21, 22, 23, 24, 25, 26,
31, 32, 33, 34, 35, 36, 41, 42, 43, 44, 45, 46,
51, 52, 53, 54, 55, 56, 61, 62, 63, 64, 65, 66]
if ab not in pab:
print( + str(pab) + +
+ str(ab))
raise ValueError()
if verb > 2:
print(" Input ab : ", ab)
msrc = ab % 10 > 3
mrec = ab // 10 > 3
if mrec:
if msrc:
ab_calc = ab - 33
else:
ab_calc = ab % 10*10 + ab // 10
else:
ab_calc = ab
if verb > 2:
if ab in [36, 63]:
print("\n> <ab> IS "+str(ab)+" WHICH IS ZERO; returning")
else:
print(" Calculated ab : ", ab_calc)
return ab_calc, msrc, mrec | r"""Check source-receiver configuration.
This check-function is called from one of the modelling routines in
:mod:`model`. Consult these modelling routines for a detailed description
of the input parameters.
Parameters
----------
ab : int
Source-receiver configuration.
verb : {0, 1, 2, 3, 4}
Level of verbosity.
Returns
-------
ab_calc : int
Adjusted source-receiver configuration using reciprocity.
msrc, mrec : bool
If True, src/rec is magnetic; if False, src/rec is electric. |
379,727 | def system_monitor_mail_relay_domain_name(self, **kwargs):
config = ET.Element("config")
system_monitor_mail = ET.SubElement(config, "system-monitor-mail", xmlns="urn:brocade.com:mgmt:brocade-system-monitor")
relay = ET.SubElement(system_monitor_mail, "relay")
host_ip_key = ET.SubElement(relay, "host-ip")
host_ip_key.text = kwargs.pop()
domain_name = ET.SubElement(relay, "domain-name")
domain_name.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
379,728 | def initialize_tasks(self):
self.tasks = chain(self.iterable, [POISON_PILL] * self.num_processes)
for task in islice(self.tasks, Q_MAX_SIZE):
log.debug(, task)
self.task_queue.put(task) | Load the input queue to capacity.
Overfilling causes a deadlock when `queue.put` blocks when
full, so further tasks are enqueued as results are returned. |
379,729 | def _AssertDataIsList(key, lst):
if not isinstance(lst, list) and not isinstance(lst, tuple):
raise NotAListError( % key)
for element in lst:
if not isinstance(element, str):
raise ElementNotAStringError(,
(element, lst)) | Assert that lst contains list data and is not structured. |
379,730 | def get_config(repo):
files = get_files(repo)
config = DEFAULT_CONFIG
if "config.json" in files:
config_file = repo.get_file_contents(, ref="gh-pages")
try:
repo_config = json.loads(config_file.decoded_content.decode("utf-8"))
config.update(repo_config)
except ValueError:
click.secho("WARNING: Unable to parse config file. Using defaults.", fg="yellow")
return config | Get the config for the repo, merged with the default config. Returns the default config if
no config file is found. |
379,731 | def _generate_footer(notebook_object, notebook_type):
footer_aux = FOOTER
if "Main_Files" in notebook_type:
footer_aux = footer_aux.replace("../MainFiles/", "")
notebook_object["cells"].append(nb.v4.new_markdown_cell(footer_aux,
**{"metadata":
{"tags": ["footer"]}}))
notebook_object["cells"].append(nb.v4.new_markdown_cell(AUX_CODE_MESSAGE,
**{"metadata": {"tags": ["hide_mark"]}}))
notebook_object["cells"].append(nb.v4.new_code_cell(CSS_STYLE_CODE,
**{"metadata": {"tags": ["hide_both"]}})) | Internal function that is used for generation of the notebooks footer.
----------
Parameters
----------
notebook_object : notebook object
Object of "notebook" class where the header will be created.
notebook_type : str
Notebook type: - "Main_Files_Signal_Samples"
- "Main_Files_By_Category"
- "Main_Files_By_Difficulty"
- "Main_Files_By_Tag"
- "Load"
- "Record"
- "Visualise"
- "Pre-Process"
- "Detect"
- "Extract"
- "Train_and_Classify"
- "Understand"
- "Evaluate" |
379,732 | def delete_external_link(self, id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_external_link_with_http_info(id, **kwargs)
else:
(data) = self.delete_external_link_with_http_info(id, **kwargs)
return data | Delete a specific external link # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_external_link(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerExternalLink
If the method is called asynchronously,
returns the request thread. |
379,733 | def version(app, appbuilder):
_appbuilder = import_application(app, appbuilder)
click.echo(
click.style(
"F.A.B Version: {0}.".format(_appbuilder.version), bg="blue", fg="white"
)
) | Flask-AppBuilder package version |
379,734 | def listar_por_equip(self, equip_id):
if equip_id is None:
raise InvalidParameterError(
u)
url = + str(equip_id) +
code, xml = self.submit(None, , url)
return self.response(code, xml) | Lista todos os ambientes por equipamento especifico.
:return: Dicionário com a seguinte estrutura:
::
{'ambiente': {'id': < id_ambiente >,
'link': < link >,
'id_divisao': < id_divisao >,
'nome_divisao': < nome_divisao >,
'id_ambiente_logico': < id_ambiente_logico >,
'nome_ambiente_logico': < nome_ambiente_logico >,
'id_grupo_l3': < id_grupo_l3 >,
'nome_grupo_l3': < nome_grupo_l3 >,
'id_filter': < id_filter >,
'filter_name': < filter_name >,
'ambiente_rede': < ambiente_rede >}}
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao gerar o XML de resposta. |
379,735 | def stop(self, timeout=1.0):
if timeout:
self._running.wait(timeout)
return self._ioloop_manager.stop(callback=self._uninstall) | Stop a running server (from another thread).
Parameters
----------
timeout : float or None, optional
Seconds to wait for server to have *started*.
Returns
-------
stopped : thread-safe Future
Resolves when the server is stopped |
379,736 | def prepare_for_json_encoding(obj):
obj_type = type(obj)
if obj_type == list or obj_type == tuple:
return [prepare_for_json_encoding(item) for item in obj]
if obj_type == dict:
return OrderedDict(
(prepare_for_json_encoding(k),
prepare_for_json_encoding(obj[k])) for k in sorted(obj.keys())
)
if obj_type == six.binary_type:
return smart_unicode_decode(obj)
if obj_type == bool or obj is None or obj_type == six.text_type or isinstance(obj, numbers.Number):
return obj
if obj_type == PSLiteral:
return u"/%s" % obj.name
return six.text_type(obj) | Convert an arbitrary object into just JSON data types (list, dict, unicode str, int, bool, null). |
379,737 | def statement_after(self, i):
k = i + 1
o = len(self.body)
n = o + len(self.else_body)
if k > 0:
if k < o:
return self.body.statement(k)
if k > o and k < n:
return self.else_body.statement(k)
if k < 0:
if k < o - n and k > -n:
return self.body.statement(k)
if k > o - n:
return self.else_body.statement(k)
return None | Return the statement after the *i*-th one, or `None`. |
379,738 | def platform_to_tags(platform, interpreter):
if platform.count() >= 3:
tags = platform.rsplit(, 3)
else:
tags = [platform, interpreter.identity.impl_ver,
interpreter.identity.abbr_impl, interpreter.identity.abi_tag]
tags[0] = tags[0].replace(, ).replace(, )
return tags | Splits a "platform" like linux_x86_64-36-cp-cp36m into its components.
If a simple platform without hyphens is specified, we will fall back to using
the current interpreter's tags. |
379,739 | def add_async_sender(
self, partition=None, operation=None, send_timeout=60,
keep_alive=30, auto_reconnect=True, loop=None):
target = "amqps://{}{}".format(self.address.hostname, self.address.path)
if operation:
target = target + operation
handler = AsyncSender(
self, target, partition=partition, send_timeout=send_timeout, keep_alive=keep_alive,
auto_reconnect=auto_reconnect, loop=loop)
self.clients.append(handler)
return handler | Add an async sender to the client to send ~azure.eventhub.common.EventData object
to an EventHub.
:param partition: Optionally specify a particular partition to send to.
If omitted, the events will be distributed to available partitions via
round-robin.
:type partition: str
:operation: An optional operation to be appended to the hostname in the target URL.
The value must start with `/` character.
:type operation: str
:param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is
queued. Default value is 60 seconds. If set to 0, there will be no timeout.
:type send_timeout: int
:param keep_alive: The time interval in seconds between pinging the connection to keep it alive during
periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not
be pinged.
:type keep_alive: int
:param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs.
Default value is `True`.
:type auto_reconnect: bool
:rtype: ~azure.eventhub.async_ops.sender_async.SenderAsync |
379,740 | def QA_data_day_resample(day_data, type_=):
try:
day_data = day_data.reset_index().set_index(, drop=False)
except:
day_data = day_data.set_index(, drop=False)
CONVERSION = {
: ,
: ,
: ,
: ,
: ,
: ,
:
} if in day_data.columns else {
: ,
: ,
: ,
: ,
: ,
: ,
:
}
return day_data.resample(
type_,
closed=
).apply(CONVERSION).dropna().reset_index().set_index([,
]) | 日线降采样
Arguments:
day_data {[type]} -- [description]
Keyword Arguments:
type_ {str} -- [description] (default: {'w'})
Returns:
[type] -- [description] |
379,741 | def switch_state(request):
if request.session.get(SESSION_KEY):
request.session[SESSION_KEY] = False
else:
request.session[SESSION_KEY] = True
return redirect(url) | Switch the default version state in
the session. |
379,742 | def list_repos(remote=False):
mgr = plugins_get_mgr()
if not remote:
repomgr = mgr.get(what=, name=)
repos = repomgr.get_repo_list()
repos.sort()
return repos
else:
raise Exception("Not supported yet") | List repos
Parameters
----------
remote: Flag |
379,743 | def get(context, request, resource=None, uid=None):
if uid and not resource:
return api.get_record(uid)
if api.is_uid(resource):
return api.get_record(resource)
portal_type = api.resource_to_portal_type(resource)
if portal_type is None:
raise APIError(404, "Not Found")
return api.get_batched(portal_type=portal_type, uid=uid, endpoint="senaite.jsonapi.v1.get") | GET |
379,744 | def iter_bases(bases):
sequences = ([list(inspect.getmro(base)) for base in bases] +
[list(bases)])
while True:
sequences = [seq for seq in sequences if seq]
if not sequences:
return
for seq in sequences:
head = seq[0]
tails = [seq for seq in sequences if head in seq[1:]]
if not tails:
break
else:
raise TypeError(
%
.join([base.__name__ for base in bases]))
yield head
for seq in sequences:
if seq[0] == head:
del seq[0] | Performs MRO linearization of a set of base classes. Yields
each base class in turn. |
379,745 | def _loop_use_cache(self, helper_function, num, fragment):
self.log([u"Examining fragment %d (cache)...", num])
fragment_info = (fragment.language, fragment.filtered_text)
if self.cache.is_cached(fragment_info):
self.log(u"Fragment cached: retrieving audio data from cache")
file_handler, file_path = self.cache.get(fragment_info)
self.log([u"Reading cached fragment at ...", file_path])
succeeded, data = self._read_audio_data(file_path)
if not succeeded:
self.log_crit(u"An unexpected error occurred while reading cached audio file")
return (False, None)
self.log([u"Reading cached fragment at ... done", file_path])
else:
self.log(u"Fragment not cached: synthesizing and caching")
file_info = gf.tmp_file(suffix=u".cache.wav", root=self.rconf[RuntimeConfiguration.TMP_PATH])
file_handler, file_path = file_info
self.log([u"Synthesizing fragment to ...", file_path])
voice_code = self._language_to_voice_code(fragment.language)
self.log(u"Calling helper function")
succeeded, data = helper_function(
text=fragment.filtered_text,
voice_code=voice_code,
output_file_path=file_path,
return_audio_data=True
)
if not succeeded:
self.log_crit(u"An unexpected error occurred in helper_function")
return (False, None)
self.log([u"Synthesizing fragment to ... done", file_path])
duration, sr_nu, enc_nu, samples = data
if duration > 0:
self.log(u"Fragment has > 0 duration, adding it to cache")
self.cache.add(fragment_info, file_info)
self.log(u"Added fragment to cache")
else:
self.log(u"Fragment has zero duration, not adding it to cache")
self.log([u"Closing file handler for cached output file path ", file_path])
gf.close_file_handler(file_handler)
self.log([u"Examining fragment %d (cache)... done", num])
return (True, data) | Synthesize all fragments using the cache |
379,746 | def shortentext(text, minlength, placeholder=):
return textwrap.shorten(text, minlength, placeholder=str(placeholder)) | Shorten some text by replacing the last part with a placeholder (such as '...')
:type text: string
:param text: The text to shorten
:type minlength: integer
:param minlength: The minimum length before a shortening will occur
:type placeholder: string
:param placeholder: The text to append after removing protruding text. |
379,747 | def form_query(self, columns, options={}):
from_cl =
direct = options.get(, self.direct)
if direct:
if columns != :
raise ProgrammingError("Column lists cannot be specified for a direct function call.")
columns =
from_cl =
if len(self.args) >= 1:
replace = [ for x in range(len(self.args))]
func = "%s(" % self.query_base + ",".join(replace) + ")"
else:
func = "%s()" % self.query_base
return "SELECT %s %s %s" % (columns, from_cl, func) | :param str columns: literal sql string for list of columns
:param dict options: dict supporting a single key "direct" as in the constructor
:return: sql string |
379,748 | def __get_host(node, vm_):
node
if __get_ssh_interface(vm_) == or vm_[] is None:
ip_address = node.private_ips[0]
log.info(, ip_address)
else:
ip_address = node.public_ips[0]
log.info(, ip_address)
if ip_address:
return ip_address
return node.name | Return public IP, private IP, or hostname for the libcloud 'node' object |
379,749 | def shell():
if salt.utils.platform.is_windows():
env_var =
default = r
else:
env_var =
default =
return {: os.environ.get(env_var, default)} | Return the default shell to use on this system |
379,750 | def calculate_dependencies():
order = []
for g in toposort(merge_dicts(dependencies, soft_dependencies)):
for t in sorted(g, key=lambda x: (priorities[x], x)):
order.append(t)
return order | Calculate test dependencies
First do a topological sorting based on the dependencies.
Then sort the different dependency groups based on priorities. |
379,751 | def Enumerate():
hid_mgr = iokit.IOHIDManagerCreate(None, None)
if not hid_mgr:
raise errors.OsHidError()
iokit.IOHIDManagerSetDeviceMatching(hid_mgr, None)
device_set_ref = iokit.IOHIDManagerCopyDevices(hid_mgr)
if not device_set_ref:
raise errors.OsHidError()
num = iokit.CFSetGetCount(device_set_ref)
devices = (IO_HID_DEVICE_REF * num)()
iokit.CFSetGetValues(device_set_ref, devices)
descriptors = []
for dev in devices:
d = base.DeviceDescriptor()
d.vendor_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_VENDOR_ID)
d.product_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_PRODUCT_ID)
d.product_string = GetDeviceStringProperty(dev,
HID_DEVICE_PROPERTY_PRODUCT)
d.usage = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_PRIMARY_USAGE)
d.usage_page = GetDeviceIntProperty(
dev, HID_DEVICE_PROPERTY_PRIMARY_USAGE_PAGE)
d.report_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_REPORT_ID)
d.path = GetDevicePath(dev)
descriptors.append(d.ToPublicDict())
cf.CFRelease(device_set_ref)
cf.CFRelease(hid_mgr)
return descriptors | See base class. |
379,752 | def delete_template(self, temp_id=None, params={}, callback=None, **kwargs):
url = self.mk_url(*[, , temp_id])
self.client.fetch(
self.mk_req(url, method=, **kwargs),
callback = callback
) | Delete a search template.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg temp_id: Template ID |
379,753 | def text2lm(text, output_file, vocab_file=None, text2idngram_kwargs={}, idngram2lm_kwargs={}):
if vocab_file:
used_vocab_file = vocab_file
else:
with tempfile.NamedTemporaryFile(suffix=, delete=False) as f:
used_vocab_file = f.name
text2vocab(text, used_vocab_file)
with tempfile.NamedTemporaryFile(suffix=, delete=False) as f:
idngram_file = f.name
try:
output1 = text2idngram(text, vocab_file=used_vocab_file, output_file=idngram_file, **text2idngram_kwargs)
output2 = idngram2lm(idngram_file, vocab_file=used_vocab_file, output_file=output_file, **idngram2lm_kwargs)
except ConversionError:
output = (None, None)
raise
else:
output = (output1, output2)
finally:
if not vocab_file:
os.remove(used_vocab_file)
os.remove(idngram_file)
return output | Convienience function to directly convert text (and vocabulary) into a language model. |
379,754 | def oneshot(self, query, **params):
if "exec_mode" in params:
raise TypeError("Cannot specify an exec_mode to oneshot.")
params[] = params.get(, )
return self.post(search=query,
exec_mode="oneshot",
**params).body | Run a oneshot search and returns a streaming handle to the results.
The ``InputStream`` object streams XML fragments from the server. To
parse this stream into usable Python objects,
pass the handle to :class:`splunklib.results.ResultsReader`::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
rr = results.ResultsReader(service.jobs.oneshot("search * | head 5"))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
assert rr.is_preview == False
The ``oneshot`` method makes a single roundtrip to the server (as opposed
to two for :meth:`create` followed by :meth:`results`), plus at most two more
if the ``autologin`` field of :func:`connect` is set to ``True``.
:raises ValueError: Raised for invalid queries.
:param query: The search query.
:type query: ``string``
:param params: Additional arguments (optional):
- "output_mode": Specifies the output format of the results (XML,
JSON, or CSV).
- "earliest_time": Specifies the earliest time in the time range to
search. The time string can be a UTC time (with fractional seconds),
a relative time specifier (to now), or a formatted time string.
- "latest_time": Specifies the latest time in the time range to
search. The time string can be a UTC time (with fractional seconds),
a relative time specifier (to now), or a formatted time string.
- "rf": Specifies one or more fields to add to the search.
:type params: ``dict``
:return: The ``InputStream`` IO handle to raw XML returned from the server. |
379,755 | def on(self, left_speed, right_speed):
(left_speed_native_units, right_speed_native_units) = self._unpack_speeds_to_native_units(left_speed, right_speed)
self.left_motor.speed_sp = int(round(left_speed_native_units))
self.right_motor.speed_sp = int(round(right_speed_native_units))
self.left_motor.run_forever()
self.right_motor.run_forever() | Start rotating the motors according to ``left_speed`` and ``right_speed`` forever.
Speeds can be percentages or any SpeedValue implementation. |
379,756 | def get_nni_installation_path():
def try_installation_path_sequentially(*sitepackages):
def _generate_installation_path(sitepackages_path):
python_dir = get_python_dir(sitepackages_path)
entry_file = os.path.join(python_dir, , )
if os.path.isfile(entry_file):
return python_dir
return None
for sitepackage in sitepackages:
python_dir = _generate_installation_path(sitepackage)
if python_dir:
return python_dir
return None
if os.getenv():
python_dir = os.getenv()
else:
python_sitepackage = site.getsitepackages()[0]
if python_sitepackage.startswith() or python_sitepackage.startswith():
python_dir = try_installation_path_sequentially(site.getusersitepackages(), site.getsitepackages()[0])
else:
python_dir = try_installation_path_sequentially(site.getsitepackages()[0], site.getusersitepackages())
if python_dir:
entry_file = os.path.join(python_dir, , )
if os.path.isfile(entry_file):
return os.path.join(python_dir, )
print_error()
exit(1) | Find nni lib from the following locations in order
Return nni root directory if it exists |
379,757 | def unlock(self):
if self.queue:
function, argument = self.queue.popleft()
function(argument)
else:
self.locked = False | Unlock a mutex. If the queue is not empty, call the next
function with its argument. |
379,758 | def apply_args(job, inputs, optional_inputs=None):
_apply_args_loop(job, inputs, INPUT_FIELD)
_apply_args_loop(job, optional_inputs, OPTIONAL_FIELD)
return job | This function is error checking before the job gets
updated.
:param job: Must be a valid job
:param inputs: Must be a tuple type
:param optional_inputs: optional for OptionalInputs
:return: job |
379,759 | def get_resource_bin_session(self, proxy):
if not self.supports_resource_bin():
raise errors.Unimplemented()
return sessions.ResourceBinSession(proxy=proxy, runtime=self._runtime) | Gets the session for retrieving resource to bin mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.resource.ResourceBinSession) - a
``ResourceBinSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_resource_bin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_resource_bin()`` is ``true``.* |
379,760 | def _bin_op(instance, opnode, op, other, context, reverse=False):
if reverse:
method_name = protocols.REFLECTED_BIN_OP_METHOD[op]
else:
method_name = protocols.BIN_OP_METHOD[op]
return functools.partial(
_invoke_binop_inference,
instance=instance,
op=op,
opnode=opnode,
other=other,
context=context,
method_name=method_name,
) | Get an inference callable for a normal binary operation.
If *reverse* is True, then the reflected method will be used instead. |
379,761 | def bounded_by_sigmas(self, sigmas=3, square=False):
bounds = self.limits_sigma(sigmas=sigmas, square=square)
return SubspaceBounded(self, bounds) | Returns a bounded subspace (SubspaceBounded) with limits given by Subspace.limits_sigma()
:rtype: SubspaceBounded |
379,762 | def get_conn(self, urlparsed=None):
if not urlparsed:
urlparsed = self.dsc_parsed2
if urlparsed.scheme == :
return HTTPConnection(urlparsed.netloc)
else:
return HTTPSConnection(urlparsed.netloc) | Returns an HTTPConnection based on the urlparse result given or the
default Swift cluster (internal url) urlparse result.
:param urlparsed: The result from urlparse.urlparse or None to use the
default Swift cluster's value |
379,763 | def estimate(s1, s2):
s1bb = s1.get_bounding_box()
s2bb = s2.get_bounding_box()
total_area = ((s2bb[] - s2bb[]+1) *
(s2bb[] - s2bb[]+1))
total_area = float(total_area)
top_area = 0.0
superscript_area = 0.0
right_area = 0.0
subscript_area = 0.0
bottom_area = 0.0
if s2bb[] > s1bb[] and s2bb[] < s1bb[]:
miny = max(s2bb[], s1bb[])
maxy = s2bb[]
minx = max(s2bb[], s1bb[])
maxx = min(s2bb[], s1bb[])
bottom_area = float((maxx-minx)*(maxy-miny))
if s2bb[] > s1bb[] and s2bb[] > s1bb[]:
miny = max(s2bb[], s1bb[])
maxy = s2bb[]
minx = max(s2bb[], s1bb[])
maxx = s2bb[]
subscript_area = (maxx-minx)*(maxy-miny)
if s2bb[] < s1bb[] and s2bb[] > s1bb[] \
and s2bb[] > s1bb[]:
miny = max(s1bb[], s2bb[])
maxy = min(s1bb[], s2bb[])
minx = max(s1bb[], s2bb[])
maxx = s2bb[]
right_area = (maxx-minx)*(maxy-miny)
if s2bb[] < s1bb[] and s2bb[] > s1bb[]:
miny = s2bb[]
maxy = min(s1bb[], s2bb[])
minx = max(s1bb[], s2bb[])
maxx = s2bb[]
superscript_area = (maxx-minx)*(maxy-miny)
if s2bb[] < s1bb[] and s2bb[] < s1bb[]:
miny = s2bb[]
maxy = min(s1bb[], s2bb[])
minx = max(s1bb[], s2bb[])
maxx = min(s1bb[], s2bb[])
top_area = (maxx-minx)*(maxy-miny)
return {: bottom_area/total_area,
: subscript_area/total_area,
: right_area/total_area,
: superscript_area/total_area,
: top_area/total_area} | Estimate the spacial relationship by
examining the position of the bounding boxes.
Parameters
----------
s1 : HandwrittenData
s2 : HandwrittenData
Returns
-------
dict of probabilities
{'bottom': 0.1,
'subscript': 0.2,
'right': 0.3,
'superscript': 0.3,
'top': 0.1} |
379,764 | def imbalance_metrics(data):
if not data:
return 0
imb = 0
num_classes=float(len(Counter(data)))
for x in Counter(data).values():
p_x = float(x)/len(data)
if p_x > 0:
imb += (p_x - 1/num_classes)*(p_x - 1/num_classes)
worst_case=(num_classes-1)*pow(1/num_classes,2) + pow(1-1/num_classes,2)
return (num_classes,imb/worst_case) | Computes imbalance metric for a given dataset.
Imbalance metric is equal to 0 when a dataset is perfectly balanced (i.e. number of in each class is exact).
:param data : pandas.DataFrame
A dataset in a panda's data frame
:returns int
A value of imbalance metric, where zero means that the dataset is perfectly balanced and the higher the value, the more imbalanced the dataset. |
379,765 | def get_indicator(self, resource):
path = resource.real_path
if os.name != and os.path.isdir(path):
return (os.path.getmtime(path),
len(os.listdir(path)),
os.path.getsize(path))
return (os.path.getmtime(path),
os.path.getsize(path)) | Return the modification time and size of a `Resource`. |
379,766 | def j0(x, context=None):
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_j0,
(BigFloat._implicit_convert(x),),
context,
) | Return the value of the first kind Bessel function of order 0 at x. |
379,767 | async def data(
self, message: Union[str, bytes], timeout: DefaultNumType = _default
) -> SMTPResponse:
await self._ehlo_or_helo_if_needed()
self._raise_error_if_disconnected()
if timeout is _default:
timeout = self.timeout
if isinstance(message, str):
message = message.encode("ascii")
async with self._command_lock:
start_response = await self.execute_command(b"DATA", timeout=timeout)
if start_response.code != SMTPStatus.start_input:
raise SMTPDataError(start_response.code, start_response.message)
try:
await self.protocol.write_message_data(
message, timeout=timeout
)
response = await self.protocol.read_response(
timeout=timeout
)
except SMTPServerDisconnected as exc:
self.close()
raise exc
if response.code != SMTPStatus.completed:
raise SMTPDataError(response.code, response.message)
return response | Send an SMTP DATA command, followed by the message given.
This method transfers the actual email content to the server.
:raises SMTPDataError: on unexpected server response code
:raises SMTPServerDisconnected: connection lost |
379,768 | def cmd_slow_requests(self):
slow_requests = [
line.time_wait_response
for line in self._valid_lines
if line.time_wait_response > 1000
]
return slow_requests | List all requests that took a certain amount of time to be
processed.
.. warning::
By now hardcoded to 1 second (1000 milliseconds), improve the
command line interface to allow to send parameters to each command
or globally. |
379,769 | def objective_fun(theta, hamiltonian=None,
quantum_resource=QVMConnection(sync_endpoint=)):
if hamiltonian is None:
return 1.0
if isinstance(hamiltonian, PauliSum):
result = estimate_locally_commuting_operator(ucc_circuit(theta), hamiltonian,
1.0E-6, quantum_resource=quantum_resource)
result = result[0][0].real
elif isinstance(hamiltonian, np.ndarray) and isinstance(quantum_resource, QVMConnection):
wf = quantum_resource.wavefunction(ucc_circuit(theta))
wf = wf.amplitudes.reshape((-1, 1))
result = np.conj(wf).T.dot(hamiltonian).dot(wf)[0, 0].real
print(result)
else:
raise TypeError("type of hamiltonian or qvm is unrecognized")
return result | Evaluate the Hamiltonian bny operator averaging
:param theta:
:param hamiltonian:
:return: |
379,770 | def _pos(self, idx):
if idx < 0:
last_len = len(self._lists[-1])
if (-idx) <= last_len:
return len(self._lists) - 1, last_len + idx
idx += self._len
if idx < 0:
raise IndexError()
elif idx >= self._len:
raise IndexError()
if idx < len(self._lists[0]):
return 0, idx
_index = self._index
if not _index:
self._build_index()
pos = 0
child = 1
len_index = len(_index)
while child < len_index:
index_child = _index[child]
if idx < index_child:
pos = child
else:
idx -= index_child
pos = child + 1
child = (pos << 1) + 1
return (pos - self._offset, idx) | Convert an index into a pair (alpha, beta) that can be used to access
the corresponding _lists[alpha][beta] position.
Most queries require the index be built. Details of the index are
described in self._build_index.
Indexing requires traversing the tree to a leaf node. Each node has
two children which are easily computable. Given an index, pos, the
left-child is at pos * 2 + 1 and the right-child is at pos * 2 + 2.
When the index is less than the left-child, traversal moves to the
left sub-tree. Otherwise, the index is decremented by the left-child
and traversal moves to the right sub-tree.
At a child node, the indexing pair is computed from the relative
position of the child node as compared with the offset and the remaining
index.
For example, using the index from self._build_index:
_index = 14 5 9 3 2 4 5
_offset = 3
Tree:
14
5 9
3 2 4 5
Indexing position 8 involves iterating like so:
1. Starting at the root, position 0, 8 is compared with the left-child
node (5) which it is greater than. When greater the index is
decremented and the position is updated to the right child node.
2. At node 9 with index 3, we again compare the index to the left-child
node with value 4. Because the index is the less than the left-child
node, we simply traverse to the left.
3. At node 4 with index 3, we recognize that we are at a leaf node and
stop iterating.
4. To compute the sublist index, we subtract the offset from the index
of the leaf node: 5 - 3 = 2. To compute the index in the sublist, we
simply use the index remaining from iteration. In this case, 3.
The final index pair from our example is (2, 3) which corresponds to
index 8 in the sorted list. |
379,771 | def convert_to_node(instance, xml_node: XmlNode, node_globals: InheritedDict = None)\
-> InstanceNode:
return InstanceNode(instance, xml_node, node_globals) | Wraps passed instance with InstanceNode |
379,772 | def get_items_for_config_file_output(self, source_to_settings,
parsed_namespace):
config_file_items = OrderedDict()
for source, settings in source_to_settings.items():
if source == _COMMAND_LINE_SOURCE_KEY:
_, existing_command_line_args = settings[]
for action in self._actions:
config_file_keys = self.get_possible_config_keys(action)
if config_file_keys and not action.is_positional_arg and \
already_on_command_line(existing_command_line_args,
action.option_strings):
value = getattr(parsed_namespace, action.dest, None)
if value is not None:
if isinstance(value, bool):
value = str(value).lower()
config_file_items[config_file_keys[0]] = value
elif source == _ENV_VAR_SOURCE_KEY:
for key, (action, value) in settings.items():
config_file_keys = self.get_possible_config_keys(action)
if config_file_keys:
value = getattr(parsed_namespace, action.dest, None)
if value is not None:
config_file_items[config_file_keys[0]] = value
elif source.startswith(_CONFIG_FILE_SOURCE_KEY):
for key, (action, value) in settings.items():
config_file_items[key] = value
elif source == _DEFAULTS_SOURCE_KEY:
for key, (action, value) in settings.items():
config_file_keys = self.get_possible_config_keys(action)
if config_file_keys:
value = getattr(parsed_namespace, action.dest, None)
if value is not None:
config_file_items[config_file_keys[0]] = value
return config_file_items | Converts the given settings back to a dictionary that can be passed
to ConfigFormatParser.serialize(..).
Args:
source_to_settings: the dictionary described in parse_known_args()
parsed_namespace: namespace object created within parse_known_args()
Returns:
an OrderedDict where keys are strings and values are either strings
or lists |
379,773 | def decode_struct_fields(self, ins, fields, obj):
for name, field_data_type in fields:
if name in obj:
try:
v = self.json_compat_obj_decode_helper(field_data_type, obj[name])
setattr(ins, name, v)
except bv.ValidationError as e:
e.add_parent(name)
raise
elif field_data_type.has_default():
setattr(ins, name, field_data_type.get_default()) | Args:
ins: An instance of the class representing the data type being decoded.
The object will have its fields set.
fields: A tuple of (field_name: str, field_validator: Validator)
obj (dict): JSON-compatible dict that is being decoded.
strict (bool): See :func:`json_compat_obj_decode`.
Returns:
None: `ins` has its fields set based on the contents of `obj`. |
379,774 | def get_data_file_attachment(self, identifier, resource_id):
model_run = self.get_object(identifier)
if model_run is None:
return None, None
if not resource_id in model_run.attachments:
return None, None
attachment = model_run.attachments[resource_id]
filename = os.path.join(model_run.attachment_directory, resource_id)
return filename, attachment.mime_type | Get path to attached data file with given resource identifer. If no
data file with given id exists the result will be None.
Raise ValueError if an image archive with the given resource identifier
is attached to the model run instead of a data file.
Parameters
----------
identifier : string
Unique model run identifier
resource_id : string
Unique attachment identifier
Returns
-------
string, string
Path to attached data file on disk and attachments MIME type |
379,775 | def update(ctx, migrate=False):
msg =
if migrate:
msg +=
header(msg)
info()
lrun()
lrun()
info()
lrun()
if migrate:
info()
lrun() | Perform a development update |
379,776 | def regularpage(foldername=None, pagename=None):
if foldername is None and pagename is None:
raise ExperimentError()
if foldername is None and pagename is not None:
return render_template(pagename)
else:
return render_template(foldername+"/"+pagename) | Route not found by the other routes above. May point to a static template. |
379,777 | def db_putString(self, db_name, key, value):
warnings.warn(, DeprecationWarning)
return (yield from self.rpc_call(,
[db_name, key, value])) | https://github.com/ethereum/wiki/wiki/JSON-RPC#db_putstring
DEPRECATED |
379,778 | def find_editor() -> str:
editor = os.environ.get()
if not editor:
if sys.platform[:3] == :
editor =
else:
if which(editor):
break
return editor | Find a reasonable editor to use by default for the system that the cmd2 application is running on. |
379,779 | def load_genomic_CDR3_anchor_pos_and_functionality(anchor_pos_file_name):
anchor_pos_and_functionality = {}
anchor_pos_file = open(anchor_pos_file_name, )
first_line = True
for line in anchor_pos_file:
if first_line:
first_line = False
continue
split_line = line.split()
split_line = [x.strip() for x in split_line]
anchor_pos_and_functionality[split_line[0]] = [int(split_line[1]), split_line[2].strip().strip()]
return anchor_pos_and_functionality | Read anchor position and functionality from file.
Parameters
----------
anchor_pos_file_name : str
File name for the functionality and position of a conserved residue
that defines the CDR3 region for each V or J germline sequence.
Returns
-------
anchor_pos_and_functionality : dict
Residue anchor position and functionality for each gene/allele. |
379,780 | def _process_file(self):
print
with open(self._rebase_file, ) as f:
raw = f.readlines()
names = [line.strip()[3:] for line in raw if line.startswith()]
seqs = [line.strip()[3:] for line in raw if line.startswith()]
if len(names) != len(seqs):
raise Exception(
)
self._enzyme_dict = {}
for name, seq in zip(names, seqs):
if in seq:
pass
elif in seq:
top_cut = seq.index()
bottom_cut = len(seq) - top_cut - 1
site = seq.replace(, )
self._enzyme_dict[name] = (site, (top_cut, bottom_cut))
elif seq.endswith():
site, cuts = seq.split()
cuts = cuts.replace(, )
top_cut, bottom_cut = [int(x) + len(site) for x in
cuts.split()]
self._enzyme_dict[name] = (site, (top_cut, bottom_cut))
shutil.rmtree(self._tmpdir) | Process rebase file into dict with name and cut site information. |
379,781 | def add_additional_options(cls, parser):
group = OptionGroup(parser, "Target Engine Options",
"These options are not required, but may be "
"provided if a specific "
"BPMN application engine is targeted.")
group.add_option("-e", "--target-engine", dest="target_engine",
help="target the specified BPMN application engine")
group.add_option(
"-t", "--target-version", dest="target_engine_version",
help="target the specified version of the BPMN application engine")
parser.add_option_group(group) | Override in subclass if required. |
379,782 | def read_stat():
data = []
with open("/proc/stat", "rb") as stat_file:
for line in stat_file:
cpu_stat = line.split()
if cpu_stat[0][:3] != b"cpu":
break
if len(cpu_stat[0]) == 3:
continue
data.append(
{
"times": {
"user": int(cpu_stat[1]),
"nice": int(cpu_stat[2]),
"sys": int(cpu_stat[3]),
"idle": int(cpu_stat[4]),
"irq": int(cpu_stat[6]),
}
}
)
return data | Returns the system stat information.
:returns: The system stat information.
:rtype: list |
379,783 | def project_closed(self, project):
yield from super().project_closed(project)
hdd_files_to_close = yield from self._find_inaccessible_hdd_files()
for hdd_file in hdd_files_to_close:
log.info("Closing VirtualBox VM disk file {}".format(os.path.basename(hdd_file)))
try:
yield from self.execute("closemedium", ["disk", hdd_file])
except VirtualBoxError as e:
log.warning("Could not close VirtualBox VM disk file {}: {}".format(os.path.basename(hdd_file), e))
continue | Called when a project is closed.
:param project: Project instance |
379,784 | def sha_github_file(cls, config, repo_file, repository_api, repository_branch):
repo_file_sha = None
cfg = config.get_conf()
github_token = cfg[][]
headers = {"Authorization": "token " + github_token}
url_dir = repository_api + "/git/trees/" + repository_branch
logger.debug("Gettting sha data from tree: %s", url_dir)
raw_repo_file_info = requests.get(url_dir, headers=headers)
raw_repo_file_info.raise_for_status()
for rfile in raw_repo_file_info.json()[]:
if rfile[] == repo_file:
logger.debug("SHA found: %s, ", rfile["sha"])
repo_file_sha = rfile["sha"]
break
return repo_file_sha | Return the GitHub SHA for a file in the repository |
379,785 | def parse_param_signature(sig):
match = PARAM_SIG_RE.match(sig.strip())
if not match:
raise RuntimeError( + sig)
groups = match.groups()
modifiers = groups[0].split()
typ, name, _, default = groups[-4:]
return ParamTuple(name=name, typ=typ,
default=default, modifiers=modifiers) | Parse a parameter signature of the form: type name (= default)? |
379,786 | def nz(value, none_value, strict=True):
if not DEBUG:
debug = False
else:
debug = False
if debug: print("START nz frameworkutilities.py ----------------------\n")
if value is None and strict:
return_val = none_value
elif strict and value is not None:
return_val = value
elif not strict and not is_not_null(value):
return_val = none_value
else:
return_val = value
if debug: print("value: %s | none_value: %s | return_val: %s" %
(value, none_value, return_val))
if debug: print("END nz frameworkutilities.py ----------------------\n")
return return_val | This function is named after an old VBA function. It returns a default
value if the passed in value is None. If strict is False it will
treat an empty string as None as well.
example:
x = None
nz(x,"hello")
--> "hello"
nz(x,"")
--> ""
y = ""
nz(y,"hello")
--> ""
nz(y,"hello", False)
--> "hello" |
379,787 | def get_imap_capabilities(server):
capabilities = list(map(str, list(server.capabilities())))
for i in range(len(capabilities)):
capabilities[i] = str(capabilities[i]).replace("b",
"")
logger.debug("IMAP server supports: {0}".format(capabilities))
return capabilities | Returns a list of an IMAP server's capabilities
Args:
server (imapclient.IMAPClient): An instance of imapclient.IMAPClient
Returns (list): A list of capabilities |
379,788 | def quantile_for_single_value(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().quantile_for_single_value(**kwargs)
axis = kwargs.get("axis", 0)
q = kwargs.get("q", 0.5)
assert type(q) is float
def quantile_builder(df, **kwargs):
try:
return pandas.DataFrame.quantile(df, **kwargs)
except ValueError:
return pandas.Series()
func = self._build_mapreduce_func(quantile_builder, **kwargs)
result = self._full_axis_reduce(axis, func)
if axis == 0:
result.index = [q]
else:
result.columns = [q]
return result | Returns quantile of each column or row.
Returns:
A new QueryCompiler object containing the quantile of each column or row. |
379,789 | def install_package(tar_url, folder, md5_url=,
on_download=lambda: None, on_complete=lambda: None):
data_file = join(folder, basename(tar_url))
md5_url = md5_url.format(tar_url=tar_url)
try:
remote_md5 = download(md5_url).decode().split()[0]
except (UnicodeDecodeError, URLError):
raise ValueError( + md5_url)
if remote_md5 != calc_md5(data_file):
on_download()
if isfile(data_file):
try:
with tarfile.open(data_file) as tar:
for i in reversed(list(tar)):
try:
os.remove(join(folder, i.path))
except OSError:
pass
except (OSError, EOFError):
pass
download_extract_tar(tar_url, folder, data_file)
on_complete()
if remote_md5 != calc_md5(data_file):
raise ValueError( + md5_url)
return True
return False | Install or update a tar package that has an md5
Args:
tar_url (str): URL of package to download
folder (str): Location to extract tar. Will be created if doesn't exist
md5_url (str): URL of md5 to use to check for updates
on_download (Callable): Function that gets called when downloading a new update
on_complete (Callable): Function that gets called when a new download is complete
Returns:
bool: Whether the package was updated |
379,790 | def get(self, reference, country, target=datetime.date.today()):
reference = self.reference if reference is None else reference
reference_value = self.data.get(reference, country).value
target_value = self.data.get(target, country).value
return self._compute_inflation(target_value, reference_value) | Get the inflation/deflation value change for the target date based
on the reference date. Target defaults to today and the instance's
reference and country will be used if they are not provided as
parameters |
379,791 | def ext(self):
if self._filename:
return os.path.splitext(self._filename)[1].lstrip()
return {
CT.ASF: ,
CT.AVI: ,
CT.MOV: ,
CT.MP4: ,
CT.MPG: ,
CT.MS_VIDEO: ,
CT.SWF: ,
CT.WMV: ,
CT.X_MS_VIDEO: ,
}.get(self._mime_type, ) | Return the file extension for this video, e.g. 'mp4'.
The extension is that from the actual filename if known. Otherwise
it is the lowercase canonical extension for the video's MIME type.
'vid' is used if the MIME type is 'video/unknown'. |
379,792 | def is_flapping(self, alert, window=1800, count=2):
pipeline = [
{: {
: alert.environment,
: alert.resource,
: alert.event,
: alert.customer
}},
{: },
{: {
: {: datetime.utcnow() - timedelta(seconds=window)},
:
}},
{: {: , : {: 1}}}
]
responses = self.get_db().alerts.aggregate(pipeline)
for r in responses:
if r[] > count:
return True
return False | Return true if alert severity has changed more than X times in Y seconds |
379,793 | def statistical_inefficiency(X, truncate_acf=True):
assert np.ndim(X[0]) == 1,
N = _maxlength(X)
xflat = np.concatenate(X)
Xmean = np.mean(xflat)
X0 = [x-Xmean for x in X]
x2m = np.mean(xflat ** 2)
corrsum = 0.0
for lag in range(N):
acf = 0.0
n = 0.0
for x in X0:
Nx = len(x)
if (Nx > lag):
acf += np.sum(x[0:Nx-lag] * x[lag:Nx])
n += float(Nx-lag)
acf /= n
if acf <= 0 and truncate_acf:
break
elif lag > 0:
corrsum += acf * (1.0 - (float(lag)/float(N)))
corrtime = 0.5 + corrsum / x2m
return 1.0 / (2 * corrtime) | Estimates the statistical inefficiency from univariate time series X
The statistical inefficiency [1]_ is a measure of the correlatedness of samples in a signal.
Given a signal :math:`{x_t}` with :math:`N` samples and statistical inefficiency :math:`I \in (0,1]`, there are
only :math:`I \cdot N` effective or uncorrelated samples in the signal. This means that :math:`I \cdot N` should
be used in order to compute statistical uncertainties. See [2]_ for a review.
The statistical inefficiency is computed as :math:`I = (2 \tau)^{-1}` using the damped autocorrelation time
..1: \tau = \frac{1}{2}+\sum_{K=1}^{N} A(k) \left(1-\frac{k}{N}\right)
where
..1: A(k) = \frac{\langle x_t x_{t+k} \rangle_t - \langle x^2 \rangle_t}{\mathrm{var}(x)}
is the autocorrelation function of the signal :math:`{x_t}`, which is computed either for a single or multiple
trajectories.
Parameters
----------
X : float array or list of float arrays
Univariate time series (single or multiple trajectories)
truncate_acf : bool, optional, default=True
When the normalized autocorrelation function passes through 0, it is truncated in order to avoid integrating
random noise
References
----------
.. [1] Anderson, T. W.: The Statistical Analysis of Time Series (Wiley, New York, 1971)
.. [2] Janke, W: Statistical Analysis of Simulations: Data Correlations and Error Estimation
Quantum Simulations of Complex Many-Body Systems: From Theory to Algorithms, Lecture Notes,
J. Grotendorst, D. Marx, A. Muramatsu (Eds.), John von Neumann Institute for Computing, Juelich
NIC Series 10, pp. 423-445, 2002. |
379,794 | def create_embedded_unclaimed_draft(self, test_mode=False, client_id=None, is_for_embedded_signing=False, requester_email_address=None, files=None, file_urls=None, draft_type=None, subject=None, message=None, signers=None, cc_email_addresses=None, signing_redirect_url=None, requesting_redirect_url=None, form_fields_per_document=None, metadata=None, use_preexisting_fields=False, allow_decline=False):
d
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent.
form_fields_per_document (str, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference
metadata (dict, optional): Metadata to associate with the draft
use_preexisting_fields (bool): Whether to use preexisting PDF fields
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
Returns:
An UnclaimedDraft object
client_idrequester_email_addressdraft_typetest_modeclient_idrequester_email_addressis_for_embedded_signingfilesfile_urlsdraft_typesubjectmessagesigning_redirect_urlrequesting_redirect_urlsignerscc_email_addressesform_fields_per_documentmetadatause_preexisting_fieldsallow_decline': allow_decline
}
return self._create_unclaimed_draft(**params) | Creates a new Draft to be used for embedded requesting
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app used to create the embedded draft.
is_for_embedded_signing (bool, optional): Whether this is also for embedded signing. Defaults to False.
requester_email_address (str): Email address of the requester.
files (list of str): The uploaded file(s) to send for signature.
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional.
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent.
form_fields_per_document (str, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
metadata (dict, optional): Metadata to associate with the draft
use_preexisting_fields (bool): Whether to use preexisting PDF fields
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
Returns:
An UnclaimedDraft object |
379,795 | def save_namespace(self, filename):
from spyder_kernels.utils.nsview import get_remote_data
from spyder_kernels.utils.iofuncs import iofunctions
ns = self._get_current_namespace()
settings = self.namespace_view_settings
data = get_remote_data(ns, settings, mode=,
more_excluded_names=EXCLUDED_NAMES).copy()
return iofunctions.save(data, filename) | Save namespace into filename |
379,796 | def raise_for_missing_namespace(self, line: str, position: int, namespace: str, name: str) -> None:
if not self.has_namespace(namespace):
raise UndefinedNamespaceWarning(self.get_line_number(), line, position, namespace, name) | Raise an exception if the namespace is not defined. |
379,797 | def write(self, pack_uri, blob):
self._zipf.writestr(pack_uri.membername, blob) | Write *blob* to this zip package with the membername corresponding to
*pack_uri*. |
379,798 | def from_function(cls, function):
module_name = function.__module__
function_name = function.__name__
class_name = ""
function_source_hasher = hashlib.sha1()
try:
source = inspect.getsource(function)
if sys.version_info[0] >= 3:
source = source.encode()
function_source_hasher.update(source)
function_source_hash = function_source_hasher.digest()
except (IOError, OSError, TypeError):
function_source_hash = b""
return cls(module_name, function_name, class_name,
function_source_hash) | Create a FunctionDescriptor from a function instance.
This function is used to create the function descriptor from
a python function. If a function is a class function, it should
not be used by this function.
Args:
cls: Current class which is required argument for classmethod.
function: the python function used to create the function
descriptor.
Returns:
The FunctionDescriptor instance created according to the function. |
379,799 | def param_array(self):
if (self.__dict__.get(, None) is None) or (self._param_array_.size != self.size):
self._param_array_ = np.empty(self.size, dtype=np.float64)
return self._param_array_ | Array representing the parameters of this class.
There is only one copy of all parameters in memory, two during optimization.
!WARNING!: setting the parameter array MUST always be done in memory:
m.param_array[:] = m_copy.param_array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.