Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
375,700 | def sasets(self) -> :
if not self._loaded_macros:
self._loadmacros()
self._loaded_macros = True
return SASets(self) | This methods creates a SASets object which you can use to run various analytics.
See the sasets.py module.
:return: sasets object |
375,701 | def configure_config(graph):
ns = Namespace(
subject=Config,
)
convention = ConfigDiscoveryConvention(
graph,
)
convention.configure(ns, retrieve=tuple())
return convention.config_discovery | Configure the health endpoint.
:returns: the current service configuration |
375,702 | def check(text):
err = "strunk_white.composition"
msg = "Try instead of ."
bad_forms = [
["dishonest", ["not honest"]],
["trifling", ["not important"]],
["forgot", ["did not remember"]],
["ignored", ["did not pay (any )?attention to"]],
["distrusted", ["did not have much confidence in"]],
["whether", ["the question as to whether"]],
["no doubt", ["there is no doubt but that"]],
["used for fuel", ["used for fuel purposes"]],
["he", ["he is a man who"]],
["hastily", ["in a hasty manner"]],
["this subject", ["this is a subject that"]],
["Her story is strange.", ["Her story is a strange one."]],
["because", ["the reason why is that"]],
["because / since", ["owing to the fact that"]],
["although / though", ["in spite of the fact that"]],
["remind you / notify you",
["call your attention to the fact that"]],
["I did not know that / I was unaware that",
["I was unaware of the fact that"]],
["his failure", ["the fact that he had not succeeded"]],
["my arrival", ["the fact that i had arrived"]]
]
return preferred_forms_check(text, bad_forms, err, msg) | Suggest the preferred forms. |
375,703 | def get(self, fields=[]):
request = TOPRequest()
if not fields:
shopCat = ShopCat()
fields = shopCat.fields
request[] = fields
self.create(self.execute(request))
return self.shop_cats | taobao.shopcats.list.get 获取前台展示的店铺类目
此API获取淘宝面向买家的浏览导航类目 跟后台卖家商品管理的类目有差异 |
375,704 | def releases(self):
return self._h._get_resources(
resource=(, self.name, ),
obj=Release, app=self
) | The releases for this app. |
375,705 | def colors_no_palette(colors=None, **kwds):
if isinstance(colors, str):
colors = _split_colors(colors)
else:
colors = to_triplets(colors or ())
colors = (color(c) for c in colors or ())
return palette.Palette(colors, **kwds) | Return a Palette but don't take into account Pallete Names. |
375,706 | def remove(self, ref, cb=None):
if self.is_api:
return self._remove_api(ref, cb)
else:
return self._remove_fs(ref, cb) | Check in a bundle to the remote |
375,707 | def threshold(self, value):
if isinstance(value, SpamThreshold):
self._threshold = value
else:
self._threshold = SpamThreshold(value) | Threshold used to determine if your content qualifies as spam.
On a scale from 1 to 10, with 10 being most strict, or most likely to
be considered as spam.
:param value: Threshold used to determine if your content qualifies as
spam.
On a scale from 1 to 10, with 10 being most strict, or
most likely to be considered as spam.
:type value: int |
375,708 | def eval_adiabatic_limit(YABFGN, Ytilde, P0):
Y, A, B, F, G, N = YABFGN
Klim = (P0 * (B - A * Ytilde * A) * P0).expand().simplify_scalar()
Hlim = ((Klim - Klim.dag())/2/I).expand().simplify_scalar()
Ldlim = (P0 * (G - A * Ytilde * F) * P0).expand().simplify_scalar()
dN = identity_matrix(N.shape[0]) + F.H * Ytilde * F
Nlim = (P0 * N * dN * P0).expand().simplify_scalar()
return SLH(Nlim.dag(), Ldlim.dag(), Hlim.dag()) | Compute the limiting SLH model for the adiabatic approximation
Args:
YABFGN: The tuple (Y, A, B, F, G, N)
as returned by prepare_adiabatic_limit.
Ytilde: The pseudo-inverse of Y, satisfying Y * Ytilde = P0.
P0: The projector onto the null-space of Y.
Returns:
SLH: Limiting SLH model |
375,709 | def _setup_advanced_theme(self, theme_name, output_dir, advanced_name):
output_theme_dir = os.path.join(output_dir, advanced_name)
output_images_dir = os.path.join(output_theme_dir, advanced_name)
input_theme_dir = os.path.join(
utils.get_themes_directory(theme_name, self.png_support), theme_name)
input_images_dir = os.path.join(input_theme_dir, theme_name)
advanced_pkg_dir = os.path.join(utils.get_file_directory(), "advanced")
for directory in [output_dir, output_theme_dir]:
utils.create_directory(directory)
file_name = theme_name + ".tcl"
theme_input = os.path.join(input_theme_dir, file_name)
theme_output = os.path.join(output_theme_dir, "{}.tcl".format(advanced_name))
with open(theme_input, "r") as fi, open(theme_output, "w") as fo:
for line in fi:
line = line.replace(theme_name, advanced_name)
line = line.replace("gif89", "png")
line = line.replace("gif", "png")
fo.write(line)
theme_pkg_input = os.path.join(advanced_pkg_dir, "pkgIndex.tcl")
theme_pkg_output = os.path.join(output_theme_dir, "pkgIndex.tcl")
with open(theme_pkg_input, "r") as fi, open(theme_pkg_output, "w") as fo:
for line in fi:
fo.write(line.replace("advanced", advanced_name))
theme_pkg_input = os.path.join(advanced_pkg_dir, "pkgIndex_package.tcl")
theme_pkg_output = os.path.join(output_dir, "pkgIndex.tcl")
with open(theme_pkg_input, "r") as fi, open(theme_pkg_output, "w") as fo:
for line in fi:
fo.write(line.replace("advanced", advanced_name))
if os.path.exists(output_images_dir):
rmtree(output_images_dir)
copytree(input_images_dir, output_images_dir) | Setup all the files required to enable an advanced theme.
Copies all the files over and creates the required directories
if they do not exist.
:param theme_name: theme to copy the files over from
:param output_dir: output directory to place the files in |
375,710 | def read(self, size=sys.maxsize):
blob_size = int(self.blob_properties.get())
if self._pointer < blob_size:
chunk = self._download_chunk_with_retries(
chunk_offset=self._pointer, chunk_size=size)
self._pointer += size
return chunk | Read at most size bytes from the file (less if the read hits EOF
before obtaining size bytes). |
375,711 | def field_from_django_field(cls, field_name, django_field, readonly):
FieldWidget = cls.widget_from_django_field(django_field)
widget_kwargs = cls.widget_kwargs_for_field(field_name)
field = cls.DEFAULT_RESOURCE_FIELD(
attribute=field_name,
column_name=field_name,
widget=FieldWidget(**widget_kwargs),
readonly=readonly,
default=django_field.default,
)
return field | Returns a Resource Field instance for the given Django model field. |
375,712 | def close_session(self):
if not self._session.closed:
if self._session._connector_owner:
self._session._connector.close()
self._session._connector = None | Close current session. |
375,713 | def patch(self, id_or_uri, operation, path, value, timeout=-1, custom_headers=None):
patch_request_body = [{: operation, : path, : value}]
return self.patch_request(id_or_uri=id_or_uri,
body=patch_request_body,
timeout=timeout,
custom_headers=custom_headers) | Uses the PATCH to update a resource.
Only one operation can be performed in each PATCH call.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
operation: Patch operation
path: Path
value: Value
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
Updated resource. |
375,714 | def _update_partition_srvc_node_ip(self, tenant_name, srvc_ip,
vrf_prof=None, part_name=None):
self.dcnm_obj.update_project(tenant_name, part_name,
service_node_ip=srvc_ip,
vrf_prof=vrf_prof,
desc="Service Partition") | Function to update srvc_node address of partition. |
375,715 | def is_not_inf(self):
self._validate_number()
self._validate_real()
if math.isinf(self.val):
self._err()
return self | Asserts that val is real number and not Inf (infinity). |
375,716 | def _run_cromwell(args):
main_file, json_file, project_name = _get_main_and_json(args.directory)
work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "cromwell_work"))
final_dir = utils.safe_makedir(os.path.join(work_dir, "final"))
if args.no_container:
_remove_bcbiovm_path()
log_file = os.path.join(work_dir, "%s-cromwell.log" % project_name)
metadata_file = os.path.join(work_dir, "%s-metadata.json" % project_name)
option_file = os.path.join(work_dir, "%s-options.json" % project_name)
cromwell_opts = {"final_workflow_outputs_dir": final_dir,
"default_runtime_attributes": {"bootDiskSizeGb": 20}}
with open(option_file, "w") as out_handle:
json.dump(cromwell_opts, out_handle)
cmd = ["cromwell", "-Xms1g", "-Xmx%s" % _estimate_runner_memory(json_file),
"run", "--type", "CWL",
"-Dconfig.file=%s" % hpc.create_cromwell_config(args, work_dir, json_file)]
cmd += hpc.args_to_cromwell_cl(args)
cmd += ["--metadata-output", metadata_file, "--options", option_file,
"--inputs", json_file, main_file]
with utils.chdir(work_dir):
_run_tool(cmd, not args.no_container, work_dir, log_file)
if metadata_file and utils.file_exists(metadata_file):
with open(metadata_file) as in_handle:
metadata = json.load(in_handle)
if metadata["status"] == "Failed":
_cromwell_debug(metadata)
sys.exit(1)
else:
_cromwell_move_outputs(metadata, final_dir) | Run CWL with Cromwell. |
375,717 | def indent(text, n=4):
_indent = * n
return .join(_indent + line for line in text.split()) | Indent each line of text by n spaces |
375,718 | def add(self, event, subscriber, append=True):
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber) | Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event. |
375,719 | def make_doc(self):
res = {}
for column in self.columns:
if isinstance(column[], ColumnProperty):
key = column[]
label = column[].columns[0].info.get(
, {}
).get()
if label is None:
continue
res[key] = label
elif isinstance(column[], RelationshipProperty):
key = column[]
label = column[].info.get(
, {}
).get()
if label is None:
continue
if column[].uselist:
subres = column[].make_doc()
for subkey, value in subres.items():
new_key = u"%s.first.%s" % (key, subkey)
res[new_key] = u"%s - %s (premier élément)" % (
label, value
)
new_key = u"%s.last.%s" % (key, subkey)
res[new_key] = u"%s - %s (dernier élément)" % (
label, value
)
else:
subres = column[].make_doc()
for subkey, value in subres.items():
new_key = u"%s.%s" % (key, subkey)
res[new_key] = u"%s - %s" % (label, value)
print("------------------ Rendering the docs -------------------")
keys = res.keys()
keys.sort()
for key in keys:
value = res[key]
print(u"{0} : py3o.{1}".format(value, key))
return res | Generate the doc for the current context in the form
{'key': 'label'} |
375,720 | def add_text(self, text, cursor=None, justification=None):
if cursor is None:
cursor = self.page.cursor
text = re.sub("\s\s+" , " ", text)
if justification is None:
justification = self.justification
if in text:
text_list = text.split()
for text in text_list:
PDFText(self.session, self.page, text, self.font, self.text_color, cursor, justification, self.double_spacing)
self.add_newline()
else:
PDFText(self.session, self.page, text, self.font, self.text_color, cursor, justification, self.double_spacing) | Input text, short or long. Writes in order, within the defined page boundaries. Sequential add_text commands will print without
additional whitespace. |
375,721 | def _get_config():
cfg = os.path.expanduser()
try:
fic = open(cfg)
try:
config = json.loads(fic.read())
finally:
fic.close()
except Exception:
config = {: }
if not in config:
config[] = {}
return config | Get user docker configuration
Return: dict |
375,722 | def main_generate(table_names, stream):
with stream.open() as fp:
fp.write_line("from datetime import datetime, date")
fp.write_line("from decimal import Decimal")
fp.write_line("from prom import Orm, Field")
fp.write_newlines()
for table_name, inter, fields in get_table_info(*table_names):
fp.write_line("class {}(Orm):".format(table_name.title().replace("_", "")))
fp.write_line(" table_name = ".format(table_name))
if inter.connection_config.name:
fp.write_line(" connection_name = ".format(inter.connection_config.name))
fp.write_newlines()
magic_field_names = set(["_id", "_created", "_updated"])
if "_id" in fields:
fp.write_line(get_field_def("_id", fields.pop("_id")))
magic_field_names.discard("_id")
for field_name, field_d in fields.items():
fp.write_line(get_field_def(field_name, field_d))
for magic_field_name in magic_field_names:
if magic_field_name not in fields:
fp.write_line(" {} = None".format(magic_field_name))
fp.write_newlines(2) | This will print out valid prom python code for given tables that already exist
in a database.
This is really handy when you want to bootstrap an existing database to work
with prom and don't want to manually create Orm objects for the tables you want
to use, let `generate` do it for you |
375,723 | def getpath(self, section, option):
return os.path.expanduser(os.path.expandvars(self.get(section, option))) | Return option as an expanded path. |
375,724 | def _prepare_output(partitions, verbose):
out = {}
partitions_count = len(partitions)
out[] = {
: partitions_count,
}
if partitions_count == 0:
out[] =
else:
out[] = "{count} offline partitions.".format(count=partitions_count)
if verbose:
lines = (
.format(topic, partition)
for (topic, partition) in partitions
)
out[] = "Partitions:\n" + "\n".join(lines)
else:
cmdline = sys.argv[:]
cmdline.insert(1, )
out[] += + .join(cmdline)
if verbose:
out[][] = [
{: topic, : partition}
for (topic, partition) in partitions
]
return out | Returns dict with 'raw' and 'message' keys filled. |
375,725 | def remove_description(self, id, **kwargs):
kwargs[] = True
if kwargs.get():
return self.remove_description_with_http_info(id, **kwargs)
else:
(data) = self.remove_description_with_http_info(id, **kwargs)
return data | Remove description from a specific source # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_description(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread. |
375,726 | def up(self, migration_id=None, fake=False):
if not self.check_directory():
return
for migration in self.get_migrations_to_up(migration_id):
logger.info( % migration.filename)
migration_module = self.load_migration_file(migration.filename)
if not fake:
if hasattr(migration_module, ):
migration_module.up(self.db)
else:
logger.error( % migration.filename)
record = migration.as_dict()
record[] = datetime.utcnow()
self.collection.insert(record) | Executes migrations. |
375,727 | def increase_fcp_usage(self, fcp, assigner_id=None):
connections = self.db.get_connections_from_assigner(assigner_id)
new = False
if connections == 0:
self.db.assign(fcp, assigner_id)
new = True
else:
self.db.increase_usage(fcp)
return new | Incrase fcp usage of given fcp
Returns True if it's a new fcp, otherwise return False |
375,728 | def fingers_needed(fingering):
split = False
indexfinger = False
minimum = min(finger for finger in fingering if finger)
result = 0
for finger in reversed(fingering):
if finger == 0:
split = True
else:
if not split and finger == minimum:
result += 1
indexfinger = True
else:
result += 1
return result | Return the number of fingers needed to play the given fingering. |
375,729 | def load_additional_data(self, valid_data, many, original_data):
if many:
for i, _ in enumerate(valid_data):
additional_keys = set(original_data[i]) - set(valid_data[i])
for key in additional_keys:
valid_data[i][key] = original_data[i][key]
else:
additional_keys = set(original_data) - set(valid_data)
for key in additional_keys:
valid_data[key] = original_data[key]
return valid_data | Include unknown fields after load.
Unknown fields are added with no processing at all.
Args:
valid_data (dict or list): validated data returned by ``load()``.
many (bool): if True, data and original_data are a list.
original_data (dict or list): data passed to ``load()`` in the
first place.
Returns:
dict: the same ``valid_data`` extended with the unknown attributes.
Inspired by https://github.com/marshmallow-code/marshmallow/pull/595. |
375,730 | def _salt_send_domain_event(opaque, conn, domain, event, event_data):
prefixobjectevent
data = {
: {
: domain.name(),
: domain.ID(),
: domain.UUIDString()
},
: event
}
data.update(event_data)
_salt_send_event(opaque, conn, data) | Helper function send a salt event for a libvirt domain.
:param opaque: the opaque data that is passed to the callback.
This is a dict with 'prefix', 'object' and 'event' keys.
:param conn: libvirt connection
:param domain: name of the domain related to the event
:param event: name of the event
:param event_data: additional event data dict to send |
375,731 | def wait(self):
self._done_event.wait(MAXINT)
return self._status, self._exception | wait for the done event to be set - no timeout |
375,732 | def controlled(self, control_qubit):
control_qubit = unpack_qubit(control_qubit)
self.modifiers.insert(0, "CONTROLLED")
self.qubits.insert(0, control_qubit)
return self | Add the CONTROLLED modifier to the gate with the given control qubit. |
375,733 | def OnOpen(self, event):
if undo.stack().haschanged():
save_choice = self.interfaces.get_save_request_from_user()
if save_choice is None:
return
elif save_choice:
post_command_event(self.main_window, self.main_window.SaveMsg)
f2w = get_filetypes2wildcards(
["pys", "pysu", "xls", "xlsx", "ods", "all"])
filetypes = f2w.keys()
wildcards = f2w.values()
wildcard = "|".join(wildcards)
message = _("Choose file to open.")
style = wx.OPEN
default_filetype = config["default_open_filetype"]
try:
default_filterindex = filetypes.index(default_filetype)
except ValueError:
default_filterindex = 0
get_fp_fidx = self.interfaces.get_filepath_findex_from_user
filepath, filterindex = get_fp_fidx(wildcard, message, style,
filterindex=default_filterindex)
if filepath is None:
return
filetype = filetypes[filterindex]
self.main_window.filepath = filepath
post_command_event(self.main_window,
self.main_window.GridActionOpenMsg,
attr={"filepath": filepath, "filetype": filetype})
title_text = filepath.split("/")[-1] + " - pyspread"
post_command_event(self.main_window,
self.main_window.TitleMsg, text=title_text)
self.main_window.grid.ForceRefresh()
if is_gtk():
try:
wx.Yield()
except:
pass
undo.stack().clear()
undo.stack().savepoint()
try:
post_command_event(self.main_window, self.ContentChangedMsg)
except TypeError:
pass | File open event handler |
375,734 | def parse(self):
delimiter = "|"
csv_file = self.xlsx_to_csv(self.getInputFile(), delimiter=delimiter)
reader = csv.DictReader(csv_file, delimiter=delimiter)
for n, row in enumerate(reader):
resid = row.get("SampleID", None)
serial = row.get("SerialNumber", None)
value = row.get("Value", None) or "Invalid"
if not any([resid, serial]):
self.err("Result identification not found.", numline=n)
continue
rawdict = row
rawdict["Value"] = value.rstrip(" cps/ml")
rawdict[] =
if in rawdict.get(, ):
rawdict[] = 1
else:
rawdict[] = 1.82
key = resid or serial
testname = row.get("Product", "EasyQDirector")
self._addRawResult(key, {testname: rawdict}, False) | parse the data |
375,735 | def fromvars(cls, dataset, batch_size, train=None, **kwargs):
batch = cls()
batch.batch_size = batch_size
batch.dataset = dataset
batch.fields = dataset.fields.keys()
for k, v in kwargs.items():
setattr(batch, k, v)
return batch | Create a Batch directly from a number of Variables. |
375,736 | def flash_spi_attach(self, hspi_arg):
arg = struct.pack(, hspi_arg)
if not self.IS_STUB:
self.check_command("configure SPI flash pins", ESP32ROM.ESP_SPI_ATTACH, arg) | Send SPI attach command to enable the SPI flash pins
ESP8266 ROM does this when you send flash_begin, ESP32 ROM
has it as a SPI command. |
375,737 | def key_value_contents(use_dict=None, as_class=dict, key_values=()):
if _debug: key_value_contents._debug("key_value_contents use_dict=%r as_class=%r key_values=%r", use_dict, as_class, key_values)
if use_dict is None:
use_dict = as_class()
for k, v in key_values:
if v is not None:
if hasattr(v, ):
v = v.dict_contents(as_class=as_class)
use_dict.__setitem__(k, v)
return use_dict | Return the contents of an object as a dict. |
375,738 | def set_device_id(self, dev, id):
if id < 0 or id > 255:
raise ValueError("ID must be an unsigned byte!")
com, code, ok = io.send_packet(
CMDTYPE.SETID, 1, dev, self.baudrate, 5, id)
if not ok:
raise_error(code) | Set device ID to new value.
:param str dev: Serial device address/path
:param id: Device ID to set |
375,739 | def has(self, block, name):
try:
return self._kvs.has(self._key(block, name))
except KeyError:
return False | Return whether or not the field named `name` has a non-default value |
375,740 | def __decode_dictionary(self, message_type, dictionary):
message = message_type()
for key, value in six.iteritems(dictionary):
if value is None:
try:
message.reset(key)
except AttributeError:
pass
continue
try:
field = message.field_by_name(key)
except KeyError:
variant = self.__find_variant(value)
if variant:
message.set_unrecognized_field(key, value, variant)
continue
if field.repeated:
if not isinstance(value, list):
value = [value]
valid_value = [self.decode_field(field, item)
for item in value]
setattr(message, field.name, valid_value)
continue
if value == []:
continue
try:
setattr(message, field.name, self.decode_field(field, value))
except messages.DecodeError:
if not isinstance(field, messages.EnumField):
raise
variant = self.__find_variant(value)
if variant:
message.set_unrecognized_field(key, value, variant)
return message | Merge dictionary in to message.
Args:
message: Message to merge dictionary in to.
dictionary: Dictionary to extract information from. Dictionary
is as parsed from JSON. Nested objects will also be dictionaries. |
375,741 | def reset(self):
self.quit()
self.driver_args[] = self.default_service_args
self.dcap = dict(webdriver.DesiredCapabilities.PHANTOMJS)
self._create_session() | Kills old session and creates a new one with no proxies or headers |
375,742 | def make_2d(array, verbose=True):
array = np.asarray(array)
if array.ndim < 2:
msg = \
.format(array.ndim)
if verbose:
warnings.warn(msg)
array = np.atleast_1d(array)[:,None]
return array | tiny tool to expand 1D arrays the way i want
Parameters
----------
array : array-like
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array of with ndim = 2 |
375,743 | def propmerge(into, data_from):
newprops = copy.deepcopy(into)
for prop, propval in six.iteritems(data_from):
if prop not in newprops:
newprops[prop] = propval
continue
new_sp = newprops[prop]
for subprop, spval in six.iteritems(propval):
if subprop not in new_sp:
new_sp[subprop] = spval
elif subprop == :
new_sp[subprop] = set(spval) & set(new_sp[subprop])
elif subprop == :
if spval != new_sp[subprop]:
raise TypeError("Type cannot conflict in allOfminLengthminimummaxLengthmaximummultipleOf':
if new_sp[subprop] % spval == 0:
new_sp[subprop] = spval
else:
raise AttributeError(
"Cannot set conflicting multipleOf values")
else:
new_sp[subprop] = spval
newprops[prop] = new_sp
return newprops | Merge JSON schema requirements into a dictionary |
375,744 | def _read_from_cwlinput(in_file, work_dir, runtime, parallel, input_order, output_cwl_keys):
with open(in_file) as in_handle:
inputs = json.load(in_handle)
items_by_key = {}
input_files = []
passed_keys = set([])
for key, input_val in ((k, v) for (k, v) in inputs.items() if not k.startswith(("sentinel", "ignore"))):
if key.endswith("_toolinput"):
key = key.replace("_toolinput", "")
if input_order[key] == "record":
cur_keys, items = _read_cwl_record(input_val)
passed_keys |= cur_keys
items_by_key[key] = items
else:
items_by_key[tuple(key.split("__"))] = _cwlvar_to_wdl(input_val)
input_files = _find_input_files(input_val, input_files)
prepped = _merge_cwlinputs(items_by_key, input_order, parallel)
out = []
for data in prepped:
if isinstance(data, (list, tuple)):
out.append([_finalize_cwl_in(utils.to_single_data(x), work_dir, list(passed_keys),
output_cwl_keys, runtime) for x in data])
else:
out.append(_finalize_cwl_in(data, work_dir, list(passed_keys), output_cwl_keys, runtime))
return out, input_files | Read data records from a JSON dump of inputs. Avoids command line flattening of records. |
375,745 | def searchForMessageIDs(self, query, offset=0, limit=5, thread_id=None):
thread_id, thread_type = self._getThread(thread_id, None)
data = {
"query": query,
"snippetOffset": offset,
"snippetLimit": limit,
"identifier": "thread_fbid",
"thread_fbid": thread_id,
}
j = self._post(
self.req_url.SEARCH_MESSAGES, data, fix_request=True, as_json=True
)
result = j["payload"]["search_snippets"][query]
snippets = result[thread_id]["snippets"] if result.get(thread_id) else []
for snippet in snippets:
yield snippet["message_id"] | Find and get message IDs by query
:param query: Text to search for
:param offset: Number of messages to skip
:param limit: Max. number of messages to retrieve
:param thread_id: User/Group ID to search in. See :ref:`intro_threads`
:type offset: int
:type limit: int
:return: Found Message IDs
:rtype: generator
:raises: FBchatException if request failed |
375,746 | def reset_selective(self, regex=None):
if regex is not None:
try:
m = re.compile(regex)
except TypeError:
raise TypeError()
for ns in self.all_ns_refs:
for var in ns:
if m.search(var):
del ns[var] | Clear selective variables from internal namespaces based on a
specified regular expression.
Parameters
----------
regex : string or compiled pattern, optional
A regular expression pattern that will be used in searching
variable names in the users namespaces. |
375,747 | def blacklist_token():
req = flask.request.get_json(force=True)
data = guard.extract_jwt_token(req[])
blacklist.add(data[])
return flask.jsonify(message=.format(req[])) | Blacklists an existing JWT by registering its jti claim in the blacklist.
.. example::
$ curl http://localhost:5000/blacklist_token -X POST \
-d '{"token":"<your_token>"}' |
375,748 | def predict(self, a, b):
a = np.array(a).reshape((-1, 1))
b = np.array(b).reshape((-1, 1))
return (mutual_info_regression(a, b.reshape((-1,))) + mutual_info_regression(b, a.reshape((-1,))))/2 | Compute the test statistic
Args:
a (array-like): Variable 1
b (array-like): Variable 2
Returns:
float: test statistic |
375,749 | def send(self, stream, msg_or_type, content=None, parent=None, ident=None,
buffers=None, subheader=None, track=False, header=None):
if not isinstance(stream, (zmq.Socket, ZMQStream)):
raise TypeError("stream must be Socket or ZMQStream, not %r"%type(stream))
elif track and isinstance(stream, ZMQStream):
raise TypeError("ZMQStream cannot track messages")
if isinstance(msg_or_type, (Message, dict)):
return msg | Build and send a message via stream or socket.
The message format used by this function internally is as follows:
[ident1,ident2,...,DELIM,HMAC,p_header,p_parent,p_content,
buffer1,buffer2,...]
The serialize/unserialize methods convert the nested message dict into this
format.
Parameters
----------
stream : zmq.Socket or ZMQStream
The socket-like object used to send the data.
msg_or_type : str or Message/dict
Normally, msg_or_type will be a msg_type unless a message is being
sent more than once. If a header is supplied, this can be set to
None and the msg_type will be pulled from the header.
content : dict or None
The content of the message (ignored if msg_or_type is a message).
header : dict or None
The header dict for the message (ignores if msg_to_type is a message).
parent : Message or dict or None
The parent or parent header describing the parent of this message
(ignored if msg_or_type is a message).
ident : bytes or list of bytes
The zmq.IDENTITY routing path.
subheader : dict or None
Extra header keys for this message's header (ignored if msg_or_type
is a message).
buffers : list or None
The already-serialized buffers to be appended to the message.
track : bool
Whether to track. Only for use with Sockets, because ZMQStream
objects cannot track messages.
Returns
-------
msg : dict
The constructed message.
(msg,tracker) : (dict, MessageTracker)
if track=True, then a 2-tuple will be returned,
the first element being the constructed
message, and the second being the MessageTracker |
375,750 | def solve_potts_approx(y, w, gamma=None, min_size=1, **kw):
n = len(y)
if n == 0:
return [], [], []
mu_dist = kw.get()
if mu_dist is None:
mu_dist = get_mu_dist(y, w)
kw[] = mu_dist
if gamma is None:
mu, dist = mu_dist.mu, mu_dist.dist
gamma = 3 * dist(0,n-1) * math.log(n) / n
if min_size < 10:
max_size = 20
else:
max_size = min_size + 50
right, values, dists = solve_potts(y, w, gamma, min_size=min_size, max_size=max_size, **kw)
return merge_pieces(gamma, right, values, dists, mu_dist, max_size=max_size) | Fit penalized stepwise constant function (Potts model) to data
approximatively, in linear time.
Do this by running the exact solver using a small maximum interval
size, and then combining consecutive intervals together if it
decreases the cost function. |
375,751 | def inside_polygon(x, y, coordinates):
contained = False
i = -1
y1 = coordinates[1][-1]
y_gt_y1 = y > y1
for y2 in coordinates[1]:
y_gt_y2 = y > y2
if y_gt_y1:
if not y_gt_y2:
x1 = coordinates[0][i]
x2 = coordinates[0][i + 1]
x1GEx = x <= x1
x2GEx = x <= x2
if (x1GEx and x2GEx) or ((x1GEx or x2GEx)
and (int64(y2) - int64(y)) * (int64(x2) - int64(x1)) <= (
int64(y2) - int64(y1)) * (int64(x2) - int64(x))):
contained = not contained
else:
if y_gt_y2:
x1 = coordinates[0][i]
x2 = coordinates[0][i + 1]
x1GEx = x <= x1
x2GEx = x <= x2
if (x1GEx and x2GEx) or ((x1GEx or x2GEx)
and (int64(y2) - int64(y)) * (int64(x2) - int64(x1)) >= (
int64(y2) - int64(y1)) * (int64(x2) - int64(x))):
contained = not contained
y1 = y2
y_gt_y1 = y_gt_y2
i += 1
return contained | Implementing the ray casting point in polygon test algorithm
cf. https://en.wikipedia.org/wiki/Point_in_polygon#Ray_casting_algorithm
:param x:
:param y:
:param coordinates: a polygon represented by a list containing two lists (x and y coordinates):
[ [x1,x2,x3...], [y1,y2,y3...]]
those lists are actually numpy arrays which are bei
ng read directly from a binary file
:return: true if the point (x,y) lies within the polygon
Some overflow considerations for the critical part of comparing the line segment slopes:
(y2 - y) * (x2 - x1) <= delta_y_max * delta_x_max
(y2 - y1) * (x2 - x) <= delta_y_max * delta_x_max
delta_y_max * delta_x_max = 180 * 360 < 65 x10^3
Instead of calculating with float I decided using just ints (by multiplying with 10^7). That gives us:
delta_y_max * delta_x_max = 180x10^7 * 360x10^7
delta_y_max * delta_x_max <= 65x10^17
So these numbers need up to log_2(65 x10^17) ~ 63 bits to be represented! Even though values this big should never
occur in practice (timezone polygons do not span the whole lng lat coordinate space),
32bit accuracy hence is not safe to use here!
Python 2.2 automatically uses the appropriate int data type preventing overflow
(cf. https://www.python.org/dev/peps/pep-0237/),
but here the data types are numpy internal static data types. The data is stored as int32
-> use int64 when comparing slopes! |
375,752 | def set_custom_serializer(self, _type, serializer):
validate_type(_type)
validate_serializer(serializer, StreamSerializer)
self._custom_serializers[_type] = serializer | Assign a serializer for the type.
:param _type: (Type), the target type of the serializer
:param serializer: (Serializer), Custom Serializer constructor function |
375,753 | def get_time(self) -> float:
if self.pause_time is not None:
curr_time = self.pause_time - self.offset - self.start_time
return curr_time
curr_time = time.time()
return curr_time - self.start_time - self.offset | Get the current time in seconds
Returns:
The current time in seconds |
375,754 | def memory_zones(self):
count = self.num_memory_zones()
if count == 0:
return list()
buf = (structs.JLinkMemoryZone * count)()
res = self._dll.JLINK_GetMemZones(buf, count)
if res < 0:
raise errors.JLinkException(res)
return list(buf) | Gets all memory zones supported by the current target.
Some targets support multiple memory zones. This function provides the
ability to get a list of all the memory zones to facilate using the
memory zone routing functions.
Args:
self (JLink): the ``JLink`` instance
Returns:
A list of all the memory zones as ``JLinkMemoryZone`` structures.
Raises:
JLinkException: on hardware errors. |
375,755 | def get_messages(self):
uri = .format(self.data["uri"])
return self._helper.do_get(uri) | Retrieves the error or status messages associated with the specified profile.
Returns:
dict: Server Profile Health. |
375,756 | def set_children(self, value, defined):
self.children = value
self.children_defined = defined
return self | Set the children of the object. |
375,757 | def get_bibliography(lsst_bib_names=None, bibtex=None):
bibtex_data = get_lsst_bibtex(bibtex_filenames=lsst_bib_names)
pybtex_data = [pybtex.database.parse_string(_bibtex, )
for _bibtex in bibtex_data.values()]
if bibtex is not None:
pybtex_data.append(pybtex.database.parse_string(bibtex, ))
bib = pybtex_data[0]
if len(pybtex_data) > 1:
for other_bib in pybtex_data[1:]:
for key, entry in other_bib.entries.items():
bib.add_entry(key, entry)
return bib | Make a pybtex BibliographyData instance from standard lsst-texmf
bibliography files and user-supplied bibtex content.
Parameters
----------
lsst_bib_names : sequence of `str`, optional
Names of lsst-texmf BibTeX files to include. For example:
.. code-block:: python
['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads']
Default is `None`, which includes all lsst-texmf bibtex files.
bibtex : `str`
BibTeX source content not included in lsst-texmf. This can be content
from a import ``local.bib`` file.
Returns
-------
bibliography : `pybtex.database.BibliographyData`
A pybtex bibliography database that includes all given sources:
lsst-texmf bibliographies and ``bibtex``. |
375,758 | def splay(vec):
N2 = 2 ** int(numpy.log2( len(vec) ) / 2)
N1 = len(vec) / N2
return N1, N2 | Determine two lengths to split stride the input vector by |
375,759 | def extract_notification_payload(process_output):
data = []
for element in process_output.splitlines()[1:]:
parts = element.split(": ")
if len(parts) == 2:
data.append(parts[1])
return data | Processes the raw output from Gatttool stripping the first line and the
'Notification handle = 0x000e value: ' from each line
@param: process_output - the raw output from a listen commad of GattTool
which may look like this:
Characteristic value was written successfully
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 30 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 32 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 32 20 48 3d 32 37 2e 33 00
Notification handle = 0x000e value: 54 3d 32 37 2e 33 20 48 3d 32 37 2e 31 00
Notification handle = 0x000e value: 54 3d 32 37 2e 31 20 48 3d 32 37 2e 34 00
This method strips the fist line and strips the 'Notification handle = 0x000e value: ' from each line
@returns a processed string only containing the values. |
375,760 | def fill_subparser(subparser):
sets = [, , ]
urls = [ +
.format(s) for s in sets]
filenames = [.format(s) for s in sets]
subparser.set_defaults(urls=urls, filenames=filenames)
return default_downloader | Sets up a subparser to download the binarized MNIST dataset files.
The binarized MNIST dataset files
(`binarized_mnist_{train,valid,test}.amat`) are downloaded from
Hugo Larochelle's website [HUGO].
.. [HUGO] http://www.cs.toronto.edu/~larocheh/public/datasets/
binarized_mnist/binarized_mnist_{train,valid,test}.amat
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `binarized_mnist` command. |
375,761 | def _CheckPacketSize(cursor):
cur_packet_size = int(_ReadVariable("max_allowed_packet", cursor))
if cur_packet_size < MAX_PACKET_SIZE:
raise Error(
"MySQL max_allowed_packet of {0} is required, got {1}. "
"Please set max_allowed_packet={0} in your MySQL config.".format(
MAX_PACKET_SIZE, cur_packet_size)) | Checks that MySQL packet size is big enough for expected query size. |
375,762 | def get_media_list_by_selector(
self, media_selector, media_attribute="src"
):
page_url = urlparse.urlparse(self.uri)
return [
mediafile.get_instance(
urlparse.urljoin(
"%s://%s" % (
page_url.scheme,
page_url.netloc
),
urlparse.urlparse(
media.attrib[media_attribute],
scheme="http"
).geturl()
)
)
for media in self.parsedpage.get_nodes_by_selector(media_selector)
] | Return a list of media. |
375,763 | def _expected_condition_find_first_element(self, elements):
from toolium.pageelements.page_element import PageElement
element_found = None
for element in elements:
try:
if isinstance(element, PageElement):
element._web_element = None
element._find_web_element()
else:
self.driver_wrapper.driver.find_element(*element)
element_found = element
break
except (NoSuchElementException, TypeError):
pass
return element_found | Try to find sequentially the elements of the list and return the first element found
:param elements: list of PageElements or element locators as a tuple (locator_type, locator_value) to be found
sequentially
:returns: first element found or None
:rtype: toolium.pageelements.PageElement or tuple |
375,764 | def decrypt(private, ciphertext, output):
privatekeydata = json.load(private)
assert in privatekeydata
pub = load_public_key(privatekeydata[])
log("Loading private key")
private_key_error = "Invalid private key"
assert in privatekeydata, private_key_error
assert "decrypt" in privatekeydata[], private_key_error
assert in privatekeydata, private_key_error
assert in privatekeydata, private_key_error
assert privatekeydata[] == , private_key_error
_p = phe.util.base64_to_int(privatekeydata[])
_q = phe.util.base64_to_int(privatekeydata[])
private_key = phe.PaillierPrivateKey(pub, _p, _q)
log("Decrypting ciphertext")
enc = load_encrypted_number(ciphertext, pub)
out = private_key.decrypt(enc)
print(out, file=output) | Decrypt ciphertext with private key.
Requires PRIVATE key file and the CIPHERTEXT encrypted with
the corresponding public key. |
375,765 | async def fetch_message(self, id):
channel = await self._get_channel()
data = await self._state.http.get_message(channel.id, id)
return self._state.create_message(channel=channel, data=data) | |coro|
Retrieves a single :class:`.Message` from the destination.
This can only be used by bot accounts.
Parameters
------------
id: :class:`int`
The message ID to look for.
Raises
--------
:exc:`.NotFound`
The specified message was not found.
:exc:`.Forbidden`
You do not have the permissions required to get a message.
:exc:`.HTTPException`
Retrieving the message failed.
Returns
--------
:class:`.Message`
The message asked for. |
375,766 | def _is_gitted(self):
from os import waitpid
from subprocess import Popen, PIPE
premote = Popen("cd {}; git remote -v".format(self.repodir),
shell=True, executable="/bin/bash", stdout=PIPE, stderr=PIPE)
waitpid(premote.pid, 0)
remote = premote.stdout.readlines()
remerr = premote.stderr.readlines()
pbranch = Popen("cd {}; git branch".format(self.repodir),
shell=True, executable="/bin/bash", stdout=PIPE, stderr=PIPE)
waitpid(pbranch.pid, 0)
branch = pbranch.stdout.readlines()
braerr = pbranch.stderr.readlines()
if len(remote) > 0 and len(remerr) > 0 and len(branch) > 0:
return ((".git" in remote[0] and "fatal" not in remerr[0])
and any(["testing" in b for b in branch]))
elif self.testmode and len(remote) == 0 and len(branch) == 0 and len(remerr) == 0:
return True
else:
return False | Returns true if the current repodir has been initialized in git *and*
had a remote origin added *and* has a 'testing' branch. |
375,767 | def select(self, selections):
atomsbonds
if in selections:
self.selection_state[] = selections[]
self.on_atom_selection_changed()
if in selections:
self.selection_state[] = selections[]
self.on_bond_selection_changed()
if in selections:
self.selection_state[] = selections[]
return self.selection_state | Make a selection in this
representation. BallAndStickRenderer support selections of
atoms and bonds.
To select the first atom and the first bond you can use the
following code::
from chemlab.mviewer.state import Selection
representation.select({'atoms': Selection([0], system.n_atoms),
'bonds': Selection([0], system.n_bonds)})
Returns the current Selection |
375,768 | def _deserialize_class(cls, input_cls_name, trusted, strict):
if not input_cls_name or input_cls_name == cls.__name__:
return cls
if trusted and input_cls_name in cls._REGISTRY:
return cls._REGISTRY[input_cls_name]
if strict:
raise ValueError(
.format(input_cls_name, cls.__name__)
)
return cls | Returns the HasProperties class to use for deserialization |
375,769 | def get_version(dunder_file):
path = abspath(expanduser(dirname(dunder_file)))
try:
return _get_version_from_version_file(path) or _get_version_from_git_tag(path)
except CalledProcessError as e:
log.warn(repr(e))
return None
except Exception as e:
log.exception(e)
return None | Returns a version string for the current package, derived
either from git or from a .version file.
This function is expected to run in two contexts. In a development
context, where .git/ exists, the version is pulled from git tags.
Using the BuildPyCommand and SDistCommand classes for cmdclass in
setup.py will write a .version file into any dist.
In an installed context, the .version file written at dist build
time is the source of version information. |
375,770 | def int_to_varbyte(self, value):
length = int(log(max(value, 1), 0x80)) + 1
bytes = [value >> i * 7 & 0x7F for i in range(length)]
bytes.reverse()
for i in range(len(bytes) - 1):
bytes[i] = bytes[i] | 0x80
return pack( % len(bytes), *bytes) | Convert an integer into a variable length byte.
How it works: the bytes are stored in big-endian (significant bit
first), the highest bit of the byte (mask 0x80) is set when there
are more bytes following. The remaining 7 bits (mask 0x7F) are used
to store the value. |
375,771 | def load_edited_source(self, source, good_cb=None, bad_cb=None, filename=None):
with LiveExecution.lock:
self.good_cb = good_cb
self.bad_cb = bad_cb
try:
compile(source + , filename or self.filename, "exec")
self.edited_source = source
except Exception as e:
if bad_cb:
self.edited_source = None
tb = traceback.format_exc()
self.call_bad_cb(tb)
return
if filename is not None:
self.filename = filename | Load changed code into the execution environment.
Until the code is executed correctly, it will be
in the 'tenuous' state. |
375,772 | def ctype_class(self):
def struct_factory(field_types):
class Struct(Structure):
_fields_ = [(str(i), t.ctype_class)
for i, t in enumerate(field_types)]
return Struct
if frozenset(self.field_types) not in WeldVec._singletons:
WeldStruct._singletons[
frozenset(self.field_types)] = struct_factory(self.field_types)
return WeldStruct._singletons[frozenset(self.field_types)] | Summary
Returns:
TYPE: Description |
375,773 | def get_raw(config, backend_section, arthur):
if arthur:
task = TaskRawDataArthurCollection(config, backend_section=backend_section)
else:
task = TaskRawDataCollection(config, backend_section=backend_section)
TaskProjects(config).execute()
try:
task.execute()
logging.info("Loading raw data finished!")
except Exception as e:
logging.error(str(e))
sys.exit(-1) | Execute the raw phase for a given backend section, optionally using Arthur
:param config: a Mordred config object
:param backend_section: the backend section where the raw phase is executed
:param arthur: if true, it enables Arthur to collect the raw data |
375,774 | def scale_in(self, blocks=None, block_ids=[]):
if block_ids:
block_ids_to_kill = block_ids
else:
block_ids_to_kill = list(self.blocks.keys())[:blocks]
for block_id in block_ids_to_kill:
self._hold_block(block_id)
to_kill = [self.blocks.pop(bid) for bid in block_ids_to_kill]
if self.provider:
r = self.provider.cancel(to_kill)
return r | Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Parameters
----------
blocks : int
Number of blocks to terminate and scale_in by
block_ids : list
List of specific block ids to terminate. Optional
Raises:
NotImplementedError |
375,775 | def get_threads(session, query):
response = make_get_request(session, , params_data=query)
json_data = response.json()
if response.status_code == 200:
return json_data[]
else:
raise ThreadsNotFoundException(
message=json_data[],
error_code=json_data[],
request_id=json_data[]
) | Get one or more threads |
375,776 | def new_fills_report(self,
start_date,
end_date,
account_id=None,
product_id=,
format=None,
email=None):
return self._new_report(start_date,
,
end_date,
account_id,
product_id,
format,
email) | `<https://docs.exchange.coinbase.com/#create-a-new-report>`_ |
375,777 | def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1):
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
dtype_numeric = dtype == "numeric"
if sp.issparse(array):
if dtype_numeric:
dtype = None
array = _ensure_sparse_format(array, accept_sparse, dtype, order,
copy, force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
if dtype_numeric:
if hasattr(array, "dtype") and getattr(array.dtype, "kind", None) == "O":
dtype = np.float64
else:
dtype = None
array = np.array(array, dtype=dtype, order=order, copy=copy)
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
return array | Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
Returns
-------
X_converted : object
The converted and validated X. |
375,778 | def stdev(requestContext, seriesList, points, windowTolerance=0.1):
for seriesIndex, series in enumerate(seriesList):
stdevSeries = TimeSeries("stdev(%s,%d)" % (series.name, int(points)),
series.start, series.end, series.step, [])
stdevSeries.pathExpression = "stdev(%s,%d)" % (series.name,
int(points))
validPoints = 0
currentSum = 0
currentSumOfSquares = 0
for index, newValue in enumerate(series):
if index < points:
bootstrapping = True
droppedValue = None
else:
bootstrapping = False
droppedValue = series[index - points]
if not bootstrapping and droppedValue is not None:
validPoints -= 1
if newValue is not None:
validPoints += 1
if not bootstrapping and droppedValue is not None:
currentSum -= droppedValue
currentSumOfSquares -= droppedValue**2
if newValue is not None:
currentSum += newValue
currentSumOfSquares += newValue**2
if (
validPoints > 0 and
float(validPoints) / points >= windowTolerance
):
try:
deviation = math.sqrt(validPoints * currentSumOfSquares -
currentSum**2) / validPoints
except ValueError:
deviation = None
stdevSeries.append(deviation)
else:
stdevSeries.append(None)
seriesList[seriesIndex] = stdevSeries
return seriesList | Takes one metric or a wildcard seriesList followed by an integer N.
Draw the Standard Deviation of all metrics passed for the past N
datapoints. If the ratio of null points in the window is greater than
windowTolerance, skip the calculation. The default for windowTolerance is
0.1 (up to 10% of points in the window can be missing). Note that if this
is set to 0.0, it will cause large gaps in the output anywhere a single
point is missing.
Example::
&target=stdev(server*.instance*.threads.busy,30)
&target=stdev(server*.instance*.cpu.system,30,0.0) |
375,779 | def compileSass(sassPath):
cssPath = os.path.splitext(sassPath)[0] + ".css"
print("Compiling Sass")
process = subprocess.Popen(["sass", sassPath, cssPath])
process.wait() | Compile a sass file (and dependencies) into a single css file. |
375,780 | def get_overlapping_ranges(self, collection_link, partition_key_ranges):
cl = self._documentClient
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
collection_routing_map = self._collection_routing_map_by_item.get(collection_id)
if collection_routing_map is None:
collection_pk_ranges = list(cl._ReadPartitionKeyRanges(collection_link))
collection_pk_ranges = _PartitionKeyRangeCache._discard_parent_ranges(collection_pk_ranges)
collection_routing_map = _CollectionRoutingMap.CompleteRoutingMap([(r, True) for r in collection_pk_ranges], collection_id)
self._collection_routing_map_by_item[collection_id] = collection_routing_map
return collection_routing_map.get_overlapping_ranges(partition_key_ranges) | Given a partition key range and a collection,
returns the list of overlapping partition key ranges
:param str collection_link:
The name of the collection.
:param list partition_key_range:
List of partition key range.
:return:
List of overlapping partition key ranges.
:rtype: list |
375,781 | def _friendlyAuthError(fn):
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized:
logger.error()
elif e.response.status_code == requests.codes.bad and in e.response.text.lower():
logger.error(, e.response.status_code, e.response.text)
logger.error()
else:
logger.error(, e.response.status_code, e.response.text)
raise
return wrapped | Decorator to print a friendly you-are-not-authorised message. Use
**outside** the _handleAuth decorator to only print the message after
the user has been given a chance to login. |
375,782 | def extract_kwargs(names:Collection[str], kwargs:KWArgs):
"Extract the keys in `names` from the `kwargs`."
new_kwargs = {}
for arg_name in names:
if arg_name in kwargs:
arg_val = kwargs.pop(arg_name)
new_kwargs[arg_name] = arg_val
return new_kwargs, kwargs | Extract the keys in `names` from the `kwargs`. |
375,783 | def unregister_counter_nonzero(network):
if not hasattr(network, "__counter_nonzero_handles__"):
raise ValueError("register_counter_nonzero was not called for this network")
for h in network.__counter_nonzero_handles__:
h.remove()
delattr(network, "__counter_nonzero_handles__")
for module in network.modules():
if hasattr(module, "__counter_nonzero__"):
delattr(module, "__counter_nonzero__") | Unregister nonzero counter hooks
:param network: The network previously registered via `register_nonzero_counter` |
375,784 | def cli(ctx, feature_id, organism="", sequence=""):
return ctx.gi.annotations.get_feature_sequence(feature_id, organism=organism, sequence=sequence) | [CURRENTLY BROKEN] Get the sequence of a feature
Output:
A standard apollo feature dictionary ({"features": [{...}]}) |
375,785 | def ParseOptions(self, options):
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=[])
self._ReadParserPresetsFromFile()
argument_helper_names = [, , ]
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
self._ParseTimezoneOption(options)
self.list_hashers = self._hasher_names_string ==
self.list_parsers_and_plugins = self._parser_filter_expression ==
self.list_profilers = self._profilers ==
self.show_info = getattr(options, , False)
self.show_troubleshooting = getattr(options, , False)
if getattr(options, , False):
self._views_format_type = views.ViewsFactory.FORMAT_TYPE_MARKDOWN
self.dependencies_check = getattr(options, , True)
if (self.list_hashers or self.list_parsers_and_plugins or
self.list_profilers or self.list_timezones or self.show_info or
self.show_troubleshooting):
return
self._ParseInformationalOptions(options)
argument_helper_names = [
, , ,
, , , ,
, ]
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
self._ParseLogFileOptions(options)
self._ParseStorageMediaOptions(options)
self._ParsePerformanceOptions(options)
self._ParseProcessingOptions(options)
if not self._storage_file_path:
raise errors.BadConfigOption()
serializer_format = getattr(
options, , definitions.SERIALIZER_FORMAT_JSON)
if serializer_format not in definitions.SERIALIZER_FORMATS:
raise errors.BadConfigOption(
.format(
serializer_format))
self._storage_serializer_format = serializer_format
self._operating_system = getattr(options, , None)
if self._operating_system:
self._mount_path = getattr(options, , None)
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=[])
self._enable_sigsegv_handler = getattr(options, , False)
self._EnforceProcessMemoryLimit(self._process_memory_limit) | Parses the options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid. |
375,786 | def get_documents_in_database(self, with_id=True):
documents = []
for coll in self.get_collection_names():
documents += self.get_documents_in_collection(
coll,
with_id=with_id
)
return documents | Gets all documents in database
:param with_id: True iff each document should also come with its id
:return: List of documents in collection in database |
375,787 | def default_if_empty(self, default):
if self.closed():
raise ValueError("Attempt to call default_if_empty() on a "
"closed Queryable.")
return self._create(self._generate_default_if_empty_result(default)) | If the source sequence is empty return a single element sequence
containing the supplied default value, otherwise return the source
sequence unchanged.
Note: This method uses deferred execution.
Args:
default: The element to be returned if the source sequence is empty.
Returns:
The source sequence, or if the source sequence is empty an sequence
containing a single element with the supplied default value.
Raises:
ValueError: If the Queryable has been closed. |
375,788 | def from_poppy_creature(cls, poppy, motors, passiv, tip,
reversed_motors=[]):
chain_elements = get_chain_from_joints(poppy.urdf_file,
[m.name for m in motors])
activ = [False] + [m not in passiv for m in motors] + [True]
chain = cls.from_urdf_file(poppy.urdf_file,
base_elements=chain_elements,
last_link_vector=tip,
active_links_mask=activ)
chain.motors = [getattr(poppy, l.name) for l in chain.links[1:-1]]
for m, l in zip(chain.motors, chain.links[1:-1]):
m.angle_limit
bounds = m.__dict__[], m.__dict__[]
l.bounds = tuple(map(rad2deg, bounds))
chain._reversed = array([(-1 if m in reversed_motors else 1)
for m in motors])
return chain | Creates an kinematic chain from motors of a Poppy Creature.
:param poppy: PoppyCreature used
:param list motors: list of all motors that composed the kinematic chain
:param list passiv: list of motors which are passiv in the chain (they will not move)
:param list tip: [x, y, z] translation of the tip of the chain (in meters)
:param list reversed_motors: list of motors that should be manually reversed (due to a problem in the URDF?) |
375,789 | def get_field_from_args_or_session(config, args, field_name):
rez = getattr(args, field_name, None)
if (rez != None):
return rez
rez = config.get_session_field("default_%s"%field_name, exception_if_not_found=False)
if (rez):
return rez
raise Exception("Fail to get default_%s from config, should specify %s via --%s parameter"%(field_name, field_name, field_name.replace("_","-"))) | We try to get field_name from diffent sources:
The order of priorioty is following:
- command line argument (--<field_name>)
- current session configuration (default_<filed_name>) |
375,790 | def validate(self, expectations_config=None, evaluation_parameters=None, catch_exceptions=True, result_format=None, only_return_failures=False):
results = []
if expectations_config is None:
expectations_config = self.get_expectations_config(
discard_failed_expectations=False,
discard_result_format_kwargs=False,
discard_include_configs_kwargs=False,
discard_catch_exceptions_kwargs=False,
)
elif isinstance(expectations_config, string_types):
expectations_config = json.load(open(expectations_config, ))
if evaluation_parameters is None:
if "evaluation_parameters" in expectations_config:
evaluation_parameters = expectations_config["evaluation_parameters"]
try:
if expectations_config[][] != __version__:
warnings.warn(
"WARNING: This configuration object was built using version %s of great_expectations, but is currently being valided by version %s." % (expectations_config[][], __version__))
except KeyError:
warnings.warn(
"WARNING: No great_expectations version found in configuration object.")
for expectation in expectations_config[]:
try:
expectation_method = getattr(
self, expectation[])
if result_format is not None:
expectation[].update({: result_format})
if expectation[] in [,
]:
expectation[][] = parse_result_format(expectation[][])
expectation[][][] = 0
evaluation_args = self._build_evaluation_parameters(
expectation[], evaluation_parameters)
result = expectation_method(
catch_exceptions=catch_exceptions,
**evaluation_args
)
except Exception as err:
if catch_exceptions:
raised_exception = True
exception_traceback = traceback.format_exc()
result = {
"success": False,
"exception_info": {
"raised_exception": raised_exception,
"exception_traceback": exception_traceback,
"exception_message": str(err)
}
}
else:
raise(err)
result["expectation_config"] = copy.deepcopy(expectation)
if catch_exceptions and ( not in result):
result["exception_info"] = {
"raised_exception": False,
"exception_traceback": None,
"exception_message": None
}
results.append(result)
statistics = _calc_validation_statistics(results)
if only_return_failures:
abbrev_results = []
for exp in results:
if exp["success"] == False:
abbrev_results.append(exp)
results = abbrev_results
result = {
"results": results,
"success": statistics.success,
"statistics": {
"evaluated_expectations": statistics.evaluated_expectations,
"successful_expectations": statistics.successful_expectations,
"unsuccessful_expectations": statistics.unsuccessful_expectations,
"success_percent": statistics.success_percent,
}
}
if evaluation_parameters is not None:
result.update({"evaluation_parameters": evaluation_parameters})
return result | Generates a JSON-formatted report describing the outcome of all expectations.
Use the default expectations_config=None to validate the expectations config associated with the DataAsset.
Args:
expectations_config (json or None): \
If None, uses the expectations config generated with the DataAsset during the current session. \
If a JSON file, validates those expectations.
evaluation_parameters (dict or None): \
If None, uses the evaluation_paramters from the expectations_config provided or as part of the data_asset.
If a dict, uses the evaluation parameters in the dictionary.
catch_exceptions (boolean): \
If True, exceptions raised by tests will not end validation and will be described in the returned report.
result_format (string or None): \
If None, uses the default value ('BASIC' or as specified). \
If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', etc.).
include_config (boolean): \
If True, the returned results include the config information associated with each expectation, if \
it exists.
only_return_failures (boolean): \
If True, expectation results are only returned when ``success = False`` \
Returns:
A JSON-formatted dictionary containing a list of the validation results. \
An example of the returned format::
{
"results": [
{
"unexpected_list": [unexpected_value_1, unexpected_value_2],
"expectation_type": "expect_*",
"kwargs": {
"column": "Column_Name",
"output_format": "SUMMARY"
},
"success": true,
"raised_exception: false.
"exception_traceback": null
},
{
... (Second expectation results)
},
... (More expectations results)
],
"success": true,
"statistics": {
"evaluated_expectations": n,
"successful_expectations": m,
"unsuccessful_expectations": n - m,
"success_percent": m / n
}
}
Notes:
If the configuration object was built with a different version of great expectations then the current environment. \
If no version was found in the configuration file.
Raises:
AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError |
375,791 | def parse_dash(string, width):
"parse dash pattern specified with string"
w = max(1, int(width + 0.5))
n = len(string)
result = []
for i, c in enumerate(string):
if c == " " and len(result):
result[-1] += w + 1
elif c == "_":
result.append(8*w)
result.append(4*w)
elif c == "-":
result.append(6*w)
result.append(4*w)
elif c == ",":
result.append(4*w)
result.append(4*w)
elif c == ".":
result.append(2*w)
result.append(4*w)
return result | parse dash pattern specified with string |
375,792 | def put(self, item):
if isinstance(item, self._item_class):
self._put_one(item)
elif isinstance(item, (list, tuple)):
self._put_many(item)
else:
raise RuntimeError( % type(item)) | store item in sqlite database |
375,793 | def get_run_as_identifiers_stack(self):
session = self.get_session(False)
try:
return session.get_internal_attribute(self.run_as_identifiers_session_key)
except AttributeError:
return None | :returns: an IdentifierCollection |
375,794 | def colored(text, color=None, on_color=None, attrs=None):
if __ISON and os.getenv() is None:
fmt_str =
if color is not None:
text = fmt_str % (COLORS[color], text)
if on_color is not None:
text = fmt_str % (HIGHLIGHTS[on_color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
text += RESET
return text | Colorize text.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green') |
375,795 | def DEBUG_ON_RESPONSE(self, statusCode, responseHeader, data):
if self.DEBUG_FLAG:
self._frameBuffer[self._frameCount][1:4] = [statusCode, responseHeader, data]
responseHeader[self.DEBUG_HEADER_KEY] = self._frameCount | Update current frame with response
Current frame index will be attached to responseHeader |
375,796 | def __to_plain_containers(self,
container: Union[CommentedSeq, CommentedMap]
) -> Union[OrderedDict, list]:
if isinstance(container, CommentedMap):
new_container = OrderedDict()
for key, value_obj in container.items():
if (isinstance(value_obj, CommentedMap)
or isinstance(value_obj, CommentedSeq)):
new_container[key] = self.__to_plain_containers(value_obj)
else:
new_container[key] = value_obj
elif isinstance(container, CommentedSeq):
new_container = list()
for value_obj in container:
if (isinstance(value_obj, CommentedMap)
or isinstance(value_obj, CommentedSeq)):
new_container.append(self.__to_plain_containers(value_obj))
else:
new_container.append(value_obj)
return new_container | Converts any sequence or mapping to list or OrderedDict
Stops at anything that isn't a sequence or a mapping.
One day, we'll extract the comments and formatting and store \
them out-of-band.
Args:
mapping: The mapping of constructed subobjects to edit |
375,797 | def p_expr_div_expr(p):
p[0] = Expr.makenode(Container(p[2], p.lineno(2)), p[1], p[3]) | expr : expr BAND expr
| expr BOR expr
| expr BXOR expr
| expr PLUS expr
| expr MINUS expr
| expr MUL expr
| expr DIV expr
| expr MOD expr
| expr POW expr
| expr LSHIFT expr
| expr RSHIFT expr
| pexpr BAND expr
| pexpr BOR expr
| pexpr BXOR expr
| pexpr PLUS expr
| pexpr MINUS expr
| pexpr MUL expr
| pexpr DIV expr
| pexpr MOD expr
| pexpr POW expr
| pexpr LSHIFT expr
| pexpr RSHIFT expr
| expr BAND pexpr
| expr BOR pexpr
| expr BXOR pexpr
| expr PLUS pexpr
| expr MINUS pexpr
| expr MUL pexpr
| expr DIV pexpr
| expr MOD pexpr
| expr POW pexpr
| expr LSHIFT pexpr
| expr RSHIFT pexpr
| pexpr BAND pexpr
| pexpr BOR pexpr
| pexpr BXOR pexpr
| pexpr PLUS pexpr
| pexpr MINUS pexpr
| pexpr MUL pexpr
| pexpr DIV pexpr
| pexpr MOD pexpr
| pexpr POW pexpr
| pexpr LSHIFT pexpr
| pexpr RSHIFT pexpr |
375,798 | def channel_in_frame(channel, framefile):
channel = str(channel)
for name in iter_channel_names(framefile):
if channel == name:
return True
return False | Determine whether a channel is stored in this framefile
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
channel : `str`
name of channel to find
framefile : `str`
path of GWF file to test
Returns
-------
inframe : `bool`
whether this channel is included in the table of contents for
the given framefile |
375,799 | def run_cell_magic(self, magic_name, line, cell):
fn = self.find_cell_magic(magic_name)
if fn is None:
lm = self.find_line_magic(magic_name)
etpl = "Cell magic function `%%%%%s` not found%s."
extra = if lm is None else (
% magic_name )
error(etpl % (magic_name, extra))
else:
stack_depth = 2
magic_arg_s = self.var_expand(line, stack_depth)
with self.builtin_trap:
result = fn(line, cell)
return result | Execute the given cell magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the first input line as a single string.
cell : str
The body of the cell as a (possibly multiline) string. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.