Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
18,100 | def filter_zone(self, data):
if self.private_zone is not None:
if data[][] != self.str2bool(self.private_zone):
return False
if data[] != .format(self.domain):
return False
return True | Check if a zone is private |
18,101 | def get_userstable_data(self):
project_users = {}
project = self.tab_group.kwargs[]
try:
roles = api.keystone.role_list(self.request)
self._get_users_from_project(project_id=project.id,
roles=roles,
project_users=project_users)
self._get_users_from_groups(project_id=project.id,
roles=roles,
project_users=project_users)
except Exception:
exceptions.handle(self.request,
_("Unable to display the users of this project.")
)
return project_users.values() | Get users with roles on the project.
Roles can be applied directly on the project or through a group. |
18,102 | def _warn_silly_options(cls, args):
if in args.span_hosts_allow \
and not args.page_requisites:
_logger.warning(
_(
)
)
if in args.span_hosts_allow \
and not args.recursive:
_logger.warning(
_(
)
)
if args.warc_file and \
(args.http_proxy or args.https_proxy):
_logger.warning(_())
if (args.password or args.ftp_password or
args.http_password or args.proxy_password) and \
args.warc_file:
_logger.warning(
_()) | Print warnings about any options that may be silly. |
18,103 | def compare_variants_label_plot(data):
keys = OrderedDict()
keys[] = {: }
keys[] = {: }
pconfig = {
: ,
: ,
: ,
}
return bargraph.plot(data, cats=keys, pconfig=pconfig) | Return HTML for the Compare variants plot |
18,104 | def _compile_pfgen(self):
string =
self.pfgen = compile(eval(string), , ) | Post power flow computation for PV and SW |
18,105 | def DBObject(table_name, versioning=VersioningTypes.NONE):
def wrapped(cls):
field_names = set()
all_fields = []
for name in dir(cls):
fld = getattr(cls, name)
if fld and isinstance(fld, Field):
fld.name = name
all_fields.append(fld)
field_names.add(name)
def add_missing_field(name, default=, insert_pos=None):
if name not in field_names:
fld = Field(default=default)
fld.name = name
all_fields.insert(
len(all_fields) if insert_pos is None else insert_pos,
fld
)
add_missing_field(, insert_pos=0)
add_missing_field()
add_missing_field()
if versioning == VersioningTypes.DELTA_HISTORY:
add_missing_field(, default=list)
cls.__table_name__ = table_name
cls.__versioning__ = versioning
cls.__fields__ = all_fields
cls = DatabaseEnabled(cls)
if versioning == VersioningTypes.DELTA_HISTORY:
cls.save = _delta_save(cls.save)
return cls
return wrapped | Classes annotated with DBObject gain persistence methods. |
18,106 | def get_distbins(start=100, bins=2500, ratio=1.01):
b = np.ones(bins, dtype="float64")
b[0] = 100
for i in range(1, bins):
b[i] = b[i - 1] * ratio
bins = np.around(b).astype(dtype="int")
binsizes = np.diff(bins)
return bins, binsizes | Get exponentially sized |
18,107 | def _rename_hstore_unique(self, old_table_name, new_table_name,
old_field, new_field, keys):
old_name = self._unique_constraint_name(
old_table_name, old_field, keys)
new_name = self._unique_constraint_name(
new_table_name, new_field, keys)
sql = self.sql_hstore_unique_rename.format(
old_name=self.quote_name(old_name),
new_name=self.quote_name(new_name)
)
self.execute(sql) | Renames an existing UNIQUE constraint for the specified
hstore keys. |
18,108 | def recurse_tree(app, env, src, dest, excludes, followlinks, force, dryrun, private, suffix):
if INITPY in os.listdir(src):
root_package = src.split(os.path.sep)[-1]
else:
root_package = None
toplevels = []
for root, subs, files in walk(src, followlinks=followlinks):
assert root == src and root_package is None
for py_file in py_files:
if not shall_skip(app, os.path.join(src, py_file), private):
module = os.path.splitext(py_file)[0]
create_module_file(app, env, root_package, module, dest, suffix, dryrun, force)
toplevels.append(module)
return toplevels | Look for every file in the directory tree and create the corresponding
ReST files.
:param app: the sphinx app
:type app: :class:`sphinx.application.Sphinx`
:param env: the jinja environment
:type env: :class:`jinja2.Environment`
:param src: the path to the python source files
:type src: :class:`str`
:param dest: the output directory
:type dest: :class:`str`
:param excludes: the paths to exclude
:type excludes: :class:`list`
:param followlinks: follow symbolic links
:type followlinks: :class:`bool`
:param force: overwrite existing files
:type force: :class:`bool`
:param dryrun: do not generate files
:type dryrun: :class:`bool`
:param private: include "_private" modules
:type private: :class:`bool`
:param suffix: the file extension
:type suffix: :class:`str` |
18,109 | def generate_nodes(self, topology):
nodes = []
devices = topology[]
hypervisors = topology[]
for device in sorted(devices):
hv_id = devices[device][]
try:
tmp_node = Node(hypervisors[hv_id], self.port_id)
except IndexError:
tmp_node = Node({}, self.port_id)
tmp_node.node[][] = device
tmp_node.node[] = devices[device][]
tmp_node.node[] = devices[device][]
tmp_node.node[] = devices[device][]
tmp_node.device_info[] = devices[device][]
tmp_node.device_info[] = devices[device][]
tmp_node.device_info[] = devices[device][]
if in devices[device]:
tmp_node.device_info[] = devices[device][]
tmp_node.node[][] = device
if in devices[device] and in devices[device]:
tmp_node.node[][] = devices[device][]
tmp_node.node[][] = devices[device][]
if in devices[device]:
tmp_node.device_info[] = devices[device][]
else:
tmp_node.device_info[] =
tmp_node.set_description()
tmp_node.set_type()
for item in sorted(devices[device]):
tmp_node.add_device_items(item, devices[device])
if tmp_node.device_info[] == :
tmp_node.add_info_from_hv()
tmp_node.node[] = devices[device][]
tmp_node.calc_mb_ports()
for item in sorted(tmp_node.node[]):
if item.startswith():
tmp_node.add_slot_ports(item)
elif item.startswith():
tmp_node.add_wic_ports(item)
if tmp_node.device_info[] == :
pass
elif tmp_node.device_info[] == \
and tmp_node.device_info[] == :
tmp_node.node[][] =
tmp_node.calc_device_links()
elif tmp_node.device_info[] == :
try:
tmp_node.calc_cloud_connection()
except RuntimeError as err:
print(err)
elif tmp_node.device_info[] == :
tmp_node.process_mappings()
elif tmp_node.device_info[] == :
tmp_node.add_to_virtualbox()
tmp_node.add_vm_ethernet_ports()
tmp_node.calc_device_links()
elif tmp_node.device_info[] == :
tmp_node.add_to_qemu()
tmp_node.set_qemu_symbol()
tmp_node.add_vm_ethernet_ports()
tmp_node.calc_device_links()
self.links.extend(tmp_node.links)
self.configs.extend(tmp_node.config)
self.port_id += tmp_node.get_nb_added_ports(self.port_id)
nodes.append(tmp_node.node)
return nodes | Generate a list of nodes for the new topology
:param dict topology: processed topology from
:py:meth:`process_topology`
:return: a list of dicts on nodes
:rtype: list |
18,110 | def list_symbols(self, partial_match=None):
symbols = self._symbols.distinct(SYMBOL)
if partial_match is None:
return symbols
return [x for x in symbols if partial_match in x] | Returns all symbols in the library
Parameters
----------
partial: None or str
if not none, use this string to do a partial match on symbol names
Returns
-------
list of str |
18,111 | def apply_layout(self, child, layout):
params = self.create_layout_params(child, layout)
w = child.widget
if w:
if layout.get():
dp = self.dp
l, t, r, b = layout[]
w.setPadding(int(l*dp), int(t*dp),
int(r*dp), int(b*dp))
child.layout_params = params | Apply the flexbox specific layout. |
18,112 | def update(
self, alert_condition_nrql_id, policy_id, name=None, threshold_type=None, query=None,
since_value=None, terms=None, expected_groups=None, value_function=None,
runbook_url=None, ignore_overlap=None, enabled=True):
conditions_nrql_dict = self.list(policy_id)
target_condition_nrql = None
for condition in conditions_nrql_dict[]:
if int(condition[]) == alert_condition_nrql_id:
target_condition_nrql = condition
break
if target_condition_nrql is None:
raise NoEntityException(
.format(
policy_id,
alert_condition_nrql_id
)
)
data = {
: {
: threshold_type or target_condition_nrql[],
: target_condition_nrql[],
: name or target_condition_nrql[],
: terms or target_condition_nrql[],
: {
: query or target_condition_nrql[][],
: since_value or target_condition_nrql[][],
}
}
}
if enabled is not None:
data[][] = str(enabled).lower()
if runbook_url is not None:
data[][] = runbook_url
elif in target_condition_nrql:
data[][] = target_condition_nrql[]
if expected_groups is not None:
data[][] = expected_groups
elif in target_condition_nrql:
data[][] = target_condition_nrql[]
if ignore_overlap is not None:
data[][] = ignore_overlap
elif in target_condition_nrql:
data[][] = target_condition_nrql[]
if value_function is not None:
data[][] = value_function
elif in target_condition_nrql:
data[][] = target_condition_nrql[]
if data[][] == :
if not in data[]:
raise ConfigurationException(
)
data[].pop(, None)
data[].pop(, None)
elif data[][] == :
if not in data[]:
raise ConfigurationException(
)
if not in data[]:
raise ConfigurationException(
)
data[].pop(, None)
return self._put(
url=.format(self.URL, alert_condition_nrql_id),
headers=self.headers,
data=data
) | Updates any of the optional parameters of the alert condition nrql
:type alert_condition_nrql_id: int
:param alert_condition_nrql_id: Alerts condition NRQL id to update
:type policy_id: int
:param policy_id: Alert policy id where target alert condition belongs to
:type condition_scope: str
:param condition_scope: The scope of the condition, can be instance or application
:type name: str
:param name: The name of the alert
:type threshold_type: str
:param threshold_type: The tthreshold_typeype of the condition, can be static or outlier
:type query: str
:param query: nrql query for the alerts
:type since_value: str
:param since_value: since value for the alert
:type terms: list[hash]
:param terms: list of hashes containing threshold config for the alert
:type expected_groups: int
:param expected_groups: expected groups setting for outlier alerts
:type value_function: str
:param type: value function for static alerts
:type runbook_url: str
:param runbook_url: The url of the runbook
:type ignore_overlap: bool
:param ignore_overlap: Whether to ignore overlaps for outlier alerts
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
:raises: This will raise a
:class:`NewRelicAPIServerException<newrelic_api.exceptions.NoEntityException>`
if target alert condition is not included in target policy
:raises: This will raise a
:class:`ConfigurationException<newrelic_api.exceptions.ConfigurationException>`
if metric is set as user_defined but user_defined config is not passed
::
{
"nrql_condition": {
"name": "string",
"runbook_url": "string",
"enabled": "boolean",
"expected_groups": "integer",
"ignore_overlap": "boolean",
"value_function": "string",
"terms": [
{
"duration": "string",
"operator": "string",
"priority": "string",
"threshold": "string",
"time_function": "string"
}
],
"nrql": {
"query": "string",
"since_value": "string"
}
}
} |
18,113 | def tradingStatusDF(symbol=None, token=, version=):
x = tradingStatus(symbol, token, version)
data = []
for key in x:
d = x[key]
d[] = key
data.append(d)
df = pd.DataFrame(data)
_toDatetime(df)
return df | The Trading status message is used to indicate the current trading status of a security.
For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons.
For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable.
IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities.
In the spin, IEX will send out a Trading status message with “T” (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session.
If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System.
After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is:
Halted
Paused*
Released into an Order Acceptance Period*
Released for trading
*The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt.
https://iexcloud.io/docs/api/#deep-trading-status
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result |
18,114 | def _s3_intermediate_upload(file_obj, file_name, fields, session, callback_url):
import boto3
from boto3.s3.transfer import TransferConfig
from boto3.exceptions import S3UploadFailedError
client = boto3.client(
"s3",
aws_access_key_id=fields["upload_aws_access_key_id"],
aws_secret_access_key=fields["upload_aws_secret_access_key"],
)
config = TransferConfig(use_threads=False)
boto_kwargs = {}
if hasattr(file_obj, "progressbar"):
boto_kwargs["Callback"] = file_obj.progressbar.update
file_obj.progressbar = None
try:
client.upload_fileobj(
file_obj,
fields["s3_bucket"],
fields["file_id"],
ExtraArgs={"ServerSideEncryption": "AES256"},
Config=config,
**boto_kwargs
)
except S3UploadFailedError:
raise_connectivity_error(file_name)
try:
resp = session.post(
callback_url,
json={
"s3_path": "s3://{}/{}".format(fields["s3_bucket"], fields["file_id"]),
"filename": file_name,
"import_as_document": fields.get("import_as_document", False),
},
)
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
if resp.status_code != 200:
raise_connectivity_error(file_name)
try:
return resp.json()
except ValueError:
return {} | Uploads a single file-like object to an intermediate S3 bucket which One Codex can pull from
after receiving a callback.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST.
callback_url : `string`
API callback at One Codex which will trigger a pull from this S3 bucket.
Raises
------
UploadException
In the case of a fatal exception during an upload. Note we rely on boto3 to handle its own retry logic.
Returns
-------
`dict` : JSON results from internal confirm import callback URL |
18,115 | def predict(self, x_test):
if self.model:
lengths = map(len, x_test)
x_test = self.p.transform(x_test)
y_pred = self.model.predict(x_test)
y_pred = self.p.inverse_transform(y_pred, lengths)
return y_pred
else:
raise OSError() | Returns the prediction of the model on the given test data.
Args:
x_test : array-like, shape = (n_samples, sent_length)
Test samples.
Returns:
y_pred : array-like, shape = (n_smaples, sent_length)
Prediction labels for x. |
18,116 | def isSameStatementList(stmListA: List[HdlStatement],
stmListB: List[HdlStatement]) -> bool:
if stmListA is stmListB:
return True
if stmListA is None or stmListB is None:
return False
for a, b in zip(stmListA, stmListB):
if not a.isSame(b):
return False
return True | :return: True if two lists of HdlStatement instances are same |
18,117 | def _copy_across(self, rel_path, cb=None):
from . import copy_file_or_flo
if not self.upstream.has(rel_path):
if not self.alternate.has(rel_path):
return None
source = self.alternate.get_stream(rel_path)
sink = self.upstream.put_stream(rel_path, metadata=source.meta)
try:
copy_file_or_flo(source, sink, cb=cb)
except:
self.upstream.remove(rel_path, propagate=True)
raise
source.close()
sink.close() | If the upstream doesn't have the file, get it from the alternate and store it in the upstream |
18,118 | def distance_to(self, other_catchment):
try:
if self.country == other_catchment.country:
try:
return 0.001 * hypot(self.descriptors.centroid_ngr.x - other_catchment.descriptors.centroid_ngr.x,
self.descriptors.centroid_ngr.y - other_catchment.descriptors.centroid_ngr.y)
except TypeError:
return float()
else:
return float()
except (TypeError, KeyError):
raise InsufficientDataError("Catchment `descriptors` attribute must be set first.") | Returns the distance between the centroids of two catchments in kilometers.
:param other_catchment: Catchment to calculate distance to
:type other_catchment: :class:`.Catchment`
:return: Distance between the catchments in km.
:rtype: float |
18,119 | def to_dict(self):
input_dict = super(Add, self)._save_to_input_dict()
input_dict["class"] = str("GPy.kern.Add")
return input_dict | Convert the object into a json serializable dictionary.
Note: It uses the private method _save_to_input_dict of the parent.
:return dict: json serializable dictionary containing the needed information to instantiate the object |
18,120 | def _metahash(self):
if self._cached_metahash:
return self._cached_metahash
log.debug(, self.address,
unicode(self.address))
mhash = util.hash_str(unicode(self.address))
log.debug(, self.address, self.rule.params)
mhash = util.hash_str(str(self.rule.params), hasher=mhash)
for src in self.rule.source_files or []:
log.debug(, self.address, src)
mhash = util.hash_str(src, hasher=mhash)
mhash = util.hash_file(self.srcs_map[src], hasher=mhash)
for dep in self.rule.composed_deps() or []:
dep_rule = self.rule.subgraph.node[dep][]
for item in dep_rule.output_files:
log.debug(, self.address, item)
item_path = os.path.join(self.buildroot, item)
mhash = util.hash_str(item, hasher=mhash)
mhash = util.hash_file(item_path, hasher=mhash)
self._cached_metahash = mhash
return mhash | Checksum hash of all the inputs to this rule.
Output is invalid until collect_srcs and collect_deps have been run.
In theory, if this hash doesn't change, the outputs won't change
either, which makes it useful for caching. |
18,121 | def unescape(msg, extra_format_dict={}):
new_msg =
extra_format_dict.update(format_dict)
while len(msg):
char = msg[0]
msg = msg[1:]
if char == escape_character:
escape_key = msg[0]
msg = msg[1:]
if escape_key == escape_character:
new_msg += escape_character
elif escape_key == :
buf =
new_char =
while True:
new_char = msg[0]
msg = msg[1:]
if new_char == :
break
else:
buf += new_char
new_msg += _get_from_format_dict(extra_format_dict, buf)
else:
new_msg += _get_from_format_dict(extra_format_dict, escape_key)
if escape_key == :
fill_last = len(msg) and msg[0] in digits
colours, msg = extract_girc_colours(msg, fill_last)
new_msg += colours
else:
new_msg += char
return new_msg | Takes a girc-escaped message and returns a raw IRC message |
18,122 | async def on_step(self, iteration):
self.combinedActions = []
if self.supply_left < 5 and self.townhalls.exists and self.supply_used >= 14 and self.can_afford(UnitTypeId.SUPPLYDEPOT) and self.units(UnitTypeId.SUPPLYDEPOT).not_ready.amount + self.already_pending(UnitTypeId.SUPPLYDEPOT) < 1:
ws = self.workers.gathering
if ws:
w = ws.furthest_to(ws.center)
loc = await self.find_placement(UnitTypeId.SUPPLYDEPOT, w.position, placement_step=3)
if loc:
self.combinedActions.append(w.build(UnitTypeId.SUPPLYDEPOT, loc))
for depot in self.units(UnitTypeId.SUPPLYDEPOT).ready:
self.combinedActions.append(depot(AbilityId.MORPH_SUPPLYDEPOT_LOWER))
if self.units(UnitTypeId.BARRACKS).ready.exists and self.can_afford(UnitTypeId.ORBITALCOMMAND):
for cc in self.units(UnitTypeId.COMMANDCENTER).idle:
self.combinedActions.append(cc(AbilityId.UPGRADETOORBITAL_ORBITALCOMMAND))
if 1 <= self.townhalls.amount < 2 and self.already_pending(UnitTypeId.COMMANDCENTER) == 0 and self.can_afford(UnitTypeId.COMMANDCENTER):
next_expo = await self.get_next_expansion()
location = await self.find_placement(UnitTypeId.COMMANDCENTER, next_expo, placement_step=1)
if location:
w = self.select_build_worker(location)
if w and self.can_afford(UnitTypeId.COMMANDCENTER):
error = await self.do(w.build(UnitTypeId.COMMANDCENTER, location))
if error:
print(error)
if self.units.of_type([UnitTypeId.SUPPLYDEPOT, UnitTypeId.SUPPLYDEPOTLOWERED, UnitTypeId.SUPPLYDEPOTDROP]).ready.exists and self.units(UnitTypeId.BARRACKS).amount + self.already_pending(UnitTypeId.BARRACKS) < 4 and self.can_afford(UnitTypeId.BARRACKS):
ws = self.workers.gathering
if ws and self.townhalls.exists:
w = ws.furthest_to(ws.center)
loc = await self.find_placement(UnitTypeId.BARRACKS, self.townhalls.random.position, placement_step=4)
if loc:
self.combinedActions.append(w.build(UnitTypeId.BARRACKS, loc))
if self.units(UnitTypeId.BARRACKS).amount > 0 and self.already_pending(UnitTypeId.REFINERY) < 1:
for th in self.townhalls:
vgs = self.state.vespene_geyser.closer_than(10, th)
for vg in vgs:
if await self.can_place(UnitTypeId.REFINERY, vg.position) and self.can_afford(UnitTypeId.REFINERY):
ws = self.workers.gathering
if ws.exists:
w = ws.closest_to(vg)
self.combinedActions.append(w.build(UnitTypeId.REFINERY, vg))
enemyThreatsVeryClose = self.known_enemy_units.filter(lambda x: x.can_attack_ground).closer_than(4.5, r)
if r.weapon_cooldown != 0 and enemyThreatsVeryClose.exists:
retreatPoints = self.neighbors8(r.position, distance=2) | self.neighbors8(r.position, distance=4)
retreatPoints = {x for x in retreatPoints if self.inPathingGrid(x)}
if retreatPoints:
closestEnemy = enemyThreatsVeryClose.closest_to(r)
retreatPoint = max(retreatPoints, key=lambda x: x.distance_to(closestEnemy) - x.distance_to(r))
self.combinedActions.append(r.move(retreatPoint))
continue
self.combinedActions.append(r.move(random.choice(self.enemy_start_locations)))
if self.townhalls.exists:
for w in self.workers.idle:
th = self.townhalls.closest_to(w)
mfs = self.state.mineral_field.closer_than(10, th)
if mfs:
mf = mfs.closest_to(w)
self.combinedActions.append(w.gather(mf))
for oc in self.units(UnitTypeId.ORBITALCOMMAND).filter(lambda x: x.energy >= 50):
mfs = self.state.mineral_field.closer_than(10, oc)
if mfs:
mf = max(mfs, key=lambda x:x.mineral_contents)
self.combinedActions.append(oc(AbilityId.CALLDOWNMULE_CALLDOWNMULE, mf))
await self.do_actions(self.combinedActions) | - depots when low on remaining supply
- townhalls contains commandcenter and orbitalcommand
- self.units(TYPE).not_ready.amount selects all units of that type, filters incomplete units, and then counts the amount
- self.already_pending(TYPE) counts how many units are queued - but in this bot below you will find a slightly different already_pending function which only counts units queued (but not in construction) |
18,123 | def realpred(cls, lemma, pos, sense=None):
string_tokens = [lemma]
if pos is not None:
string_tokens.append(pos)
if sense is not None:
sense = str(sense)
string_tokens.append(sense)
predstr = .join([] + string_tokens + [])
return cls(Pred.REALPRED, lemma, pos, sense, predstr) | Instantiate a Pred from its components. |
18,124 | def step(self):
with self.__lock:
self.__value -= 1
if self.__value == 0:
self.__event.set()
return True
elif self.__value < 0:
raise ValueError("The counter has gone below 0")
return False | Decreases the internal counter. Raises an error if the counter goes
below 0
:return: True if this step was the final one, else False
:raise ValueError: The counter has gone below 0 |
18,125 | def frames(self):
if not self._running:
raise RuntimeError( %(self._path_to_images))
if self._im_index >= self._num_images:
raise RuntimeError()
color_filename = os.path.join(self._path_to_images, %(self._im_index, self._color_ext))
color_im = ColorImage.open(color_filename, frame=self._frame)
depth_filename = os.path.join(self._path_to_images, %(self._im_index))
depth_im = DepthImage.open(depth_filename, frame=self._frame)
self._im_index = (self._im_index + 1) % self._num_images
return color_im, depth_im, None | Retrieve the next frame from the image directory and convert it to a ColorImage,
a DepthImage, and an IrImage.
Parameters
----------
skip_registration : bool
If True, the registration step is skipped.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
Raises
------
RuntimeError
If the stream is not running or if all images in the
directory have been used. |
18,126 | def html(data, options, center=False, save=False,
save_name=None, save_path=, dated=True, notebook=True):
def json_dumps(obj):
return pd.io.json.dumps(obj)
_options = dict(options)
_data = data
def clean_function_str(key, n=15):
if key in _options.keys():
new_str = _options[key][:n].replace(, )
new_str = new_str.replace(, )
new_str = new_str + _options[key][n:]
_options[key] = new_str
clean_function_str()
clean_function_str()
clean_function_str()
clean_function_str()
chart_id = str(uuid.uuid4()).replace(, )
js_init = % (json_dumps(_options), JS_JSON_PARSE_OPTION,
json_dumps(_data), JS_JSON_PARSE_DATA)
js_call = % (chart_id)
if center:
if not in _options.keys():
_options[] =
html = % (chart_id, _options[])
else:
html = % (chart_id)
css = % (CSS_LIBS_ONE)
js = % (JS_LIBS_ONE, js_init, js_call)
js_load = .join([ % e for e in JS_SAVE])
if save == True:
if not os.path.exists(save_path):
os.makedirs(save_path)
tag = save_name if save_name else
dated = dt.datetime.now().strftime() if dated else
with open(os.path.join(save_path, tag + dated + ), ) as f:
f.write(js_load + html + js)
if notebook:
return html + js
else:
return js_load + html + js
return html + css + js | save=True will create a standalone HTML doc under localdir/saved (creating folfer save if necessary)
center=True will center the plot in the output cell, otherwise left-aligned by default. |
18,127 | def close(self):
try:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
except socket.error:
pass | Closes the tunnel. |
18,128 | def code_data_whitening(self, decoding, inpt):
inpt_copy = array.array("B", inpt)
return self.apply_data_whitening(decoding, inpt_copy) | XOR Data Whitening
:param decoding:
:param inpt:
:return: |
18,129 | def grant_admin_role(self):
apiuser = ApiUser(self._get_resource_root(), self.name, roles=[])
return self._put(, ApiUser, data=apiuser) | Grant admin access to a user. If the user already has admin access, this
does nothing. If the user currently has a non-admin role, it will be replaced
with the admin role.
@return: An ApiUser object |
18,130 | def listdict_to_listlist_and_matrix(sparse):
V = range(len(sparse))
graph = [[] for _ in V]
weight = [[None for v in V] for u in V]
for u in V:
for v in sparse[u]:
graph[u].append(v)
weight[u][v] = sparse[u][v]
return graph, weight | Transforms the adjacency list representation of a graph
of type listdict into the listlist + weight matrix representation
:param sparse: graph in listdict representation
:returns: couple with listlist representation, and weight matrix
:complexity: linear |
18,131 | def build_markdown_body(self, text):
key_options = {
: self.process_date_created,
: self.process_date_updated,
: self.process_title,
: self.process_input
}
for paragraph in text[]:
if in paragraph:
self.user = paragraph[]
for key, handler in key_options.items():
if key in paragraph:
handler(paragraph[key])
if self._RESULT_KEY in paragraph:
self.process_results(paragraph) | Generate the body for the Markdown file.
- processes each json block one by one
- for each block, process:
- the creator of the notebook (user)
- the date the notebook was created
- the date the notebook was last updated
- the input by detecting the editor language
- the output by detecting the output format |
18,132 | def adafactor_optimizer_from_hparams(hparams, lr):
if hparams.optimizer_adafactor_decay_type == "adam":
decay_rate = adafactor_decay_rate_adam(
hparams.optimizer_adafactor_beta2)
elif hparams.optimizer_adafactor_decay_type == "pow":
decay_rate = adafactor_decay_rate_pow(
hparams.optimizer_adafactor_memory_exponent)
else:
raise ValueError("unknown optimizer_adafactor_decay_type")
if hparams.weight_dtype == "bfloat16":
parameter_encoding = quantization.EighthPowerEncoding()
else:
parameter_encoding = None
return AdafactorOptimizer(
multiply_by_parameter_scale=(
hparams.optimizer_adafactor_multiply_by_parameter_scale),
learning_rate=lr,
decay_rate=decay_rate,
beta1=hparams.optimizer_adafactor_beta1,
clipping_threshold=hparams.optimizer_adafactor_clipping_threshold,
factored=hparams.optimizer_adafactor_factored,
simulated_quantize_bits=getattr(
hparams, "simulated_parameter_quantize_bits", 0),
parameter_encoding=parameter_encoding,
use_locking=False,
name="Adafactor") | Create an Adafactor optimizer based on model hparams.
Args:
hparams: model hyperparameters
lr: learning rate scalar.
Returns:
an AdafactorOptimizer
Raises:
ValueError: on illegal values |
18,133 | def validate(input_schema=None, output_schema=None,
input_example=None, output_example=None,
validator_cls=None,
format_checker=None, on_empty_404=False,
use_defaults=False):
@container
def _validate(rh_method):
@wraps(rh_method)
@tornado.gen.coroutine
def _wrapper(self, *args, **kwargs):
setattr(self, "body", input_)
output = rh_method(self, *args, **kwargs)
if is_future(output):
output = yield output
if not output and on_empty_404:
raise APIError(404, "Resource not found.")
if output_schema is not None:
try:
jsonschema.validate(
{"result": output},
{
"type": "object",
"properties": {
"result": output_schema
},
"required": ["result"]
}
)
except jsonschema.ValidationError as e:
raise TypeError(str(e))
self.success(output)
setattr(_wrapper, "input_schema", input_schema)
setattr(_wrapper, "output_schema", output_schema)
setattr(_wrapper, "input_example", input_example)
setattr(_wrapper, "output_example", output_example)
return _wrapper
return _validate | Parameterized decorator for schema validation
:type validator_cls: IValidator class
:type format_checker: jsonschema.FormatChecker or None
:type on_empty_404: bool
:param on_empty_404: If this is set, and the result from the
decorated method is a falsy value, a 404 will be raised.
:type use_defaults: bool
:param use_defaults: If this is set, will put 'default' keys
from schema to self.body (If schema type is object). Example:
{
'published': {'type': 'bool', 'default': False}
}
self.body will contains 'published' key with value False if no one
comes from request, also works with nested schemas. |
18,134 | def list_formats ():
print("Archive programs of", App)
print("Archive programs are searched in the following directories:")
print(util.system_search_path())
print()
for format in ArchiveFormats:
print(format, "files:")
for command in ArchiveCommands:
programs = ArchivePrograms[format]
if command not in programs and None not in programs:
print(" %8s: - (not supported)" % command)
continue
try:
program = find_archive_program(format, command)
print(" %8s: %s" % (command, program), end=)
if format == :
encs = [x for x in ArchiveCompressions if util.find_program(x)]
if encs:
print("(supported compressions: %s)" % ", ".join(encs), end=)
elif format == :
if util.p7zip_supports_rar():
print("(rar archives supported)", end=)
else:
print("(rar archives not supported)", end=)
print()
except util.PatoolError:
handlers = programs.get(None, programs.get(command))
print(" %8s: - (no program found; install %s)" %
(command, util.strlist_with_or(handlers))) | Print information about available archive formats to stdout. |
18,135 | def get_default_config(self):
config = super(MySQLCollector, self).get_default_config()
config.update({
: ,
: [],
: False,
: False,
: False,
})
return config | Returns the default collector settings |
18,136 | def pid(self):
if not self.base_pathname:
return None
try:
pidfile = os.path.join(self.base_pathname, )
return int(open(pidfile).readline())
except (IOError, OSError):
return None | The server's PID (None if not running). |
18,137 | def push(self, repository, stream=False, raise_on_error=True, **kwargs):
response = super(DockerClientWrapper, self).push(repository, stream=stream, **kwargs)
if stream:
result = self._docker_status_stream(response, raise_on_error)
else:
result = self._docker_status_stream(response.split() if response else (), raise_on_error)
return result and not result.get() | Pushes an image repository to the registry.
:param repository: Name of the repository (can include a tag).
:type repository: unicode | str
:param stream: Use the stream output format with additional status information.
:type stream: bool
:param raise_on_error: Raises errors in the status output as a DockerStatusException. Otherwise only logs
errors.
:type raise_on_error: bool
:param kwargs: Additional kwargs for :meth:`docker.client.Client.push`.
:return: ``True`` if the image has been pushed successfully.
:rtype: bool |
18,138 | def Subgroups(self):
return(Groups(alias=self.alias,groups_lst=self.data[],session=self.session)) | Returns a Groups object containing all child groups.
>>> clc.v2.Group("wa1-4416").Subgroups()
<clc.APIv2.group.Groups object at 0x105fa27d0> |
18,139 | def VerifySignature(self, message, signature, public_key, unhex=True):
return Crypto.VerifySignature(message, signature, public_key, unhex=unhex) | Verify the integrity of the message.
Args:
message (str): the message to verify.
signature (bytearray): the signature belonging to the message.
public_key (ECPoint): the public key to use for verifying the signature.
unhex (bool): whether the message should be unhexlified before verifying
Returns:
bool: True if verification passes. False otherwise. |
18,140 | def evolution_strength_of_connection(A, B=None, epsilon=4.0, k=2,
proj_type="l2", block_flag=False,
symmetrize_measure=True):
from pyamg.util.utils import scale_rows, get_block_diag, scale_columns
from pyamg.util.linalg import approximate_spectral_radius
if epsilon < 1.0:
raise ValueError("expected epsilon > 1.0")
if k <= 0:
raise ValueError("number of time steps must be > 0")
if proj_type not in [, ]:
raise ValueError("proj_type must be or ")
if (not sparse.isspmatrix_csr(A)) and (not sparse.isspmatrix_bsr(A)):
raise TypeError("expected csr_matrix or bsr_matrix")
if (not sparse.isspmatrix_csr(A)):
csrflag = False
numPDEs = A.blocksize[0]
D = A.diagonal()
if block_flag:
Dinv = get_block_diag(A, blocksize=numPDEs, inv_flag=True)
Dinv = sparse.bsr_matrix((Dinv, np.arange(Dinv.shape[0]),
np.arange(Dinv.shape[0] + 1)),
shape=A.shape)
Dinv_A = (Dinv * A).tocsr()
else:
Dinv = np.zeros_like(D)
mask = (D != 0.0)
Dinv[mask] = 1.0 / D[mask]
Dinv[D == 0] = 1.0
Dinv_A = scale_rows(A, Dinv, copy=True)
A = A.tocsr()
else:
csrflag = True
numPDEs = 1
D = A.diagonal()
Dinv = np.zeros_like(D)
mask = (D != 0.0)
Dinv[mask] = 1.0 / D[mask]
Dinv[D == 0] = 1.0
Dinv_A = scale_rows(A, Dinv, copy=True)
A.eliminate_zeros()
A.sort_indices()
dimen = A.shape[1]
NullDim = Bmat.shape[1]
rho_DinvA = approximate_spectral_radius(Dinv_A)
if proj_type == "D_A":
D_A = sparse.spdiags([D], [0], dimen, dimen, format=)
else:
D_A = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
nsquare = int(np.log2(k))
ninc = k - 2**nsquare
Id = sparse.eye(dimen, dimen, format="csr", dtype=A.dtype)
Atilde = (Id - (1.0 / rho_DinvA) * Dinv_A)
Atilde = Atilde.T.tocsr()
mask = A.copy()
if numPDEs > 1:
row_length = np.diff(mask.indptr)
my_pde = np.mod(np.arange(dimen), numPDEs)
my_pde = np.repeat(my_pde, row_length)
mask.data[np.mod(mask.indices, numPDEs) != my_pde] = 0.0
del row_length, my_pde
mask.eliminate_zeros()
if ninc > 0:
warn("The most efficient time stepping for the Evolution Strength\
Method is done in powers of two.\nYou have chosen " + str(k) +
" time steps.")
for i in range(nsquare):
Atilde = Atilde * Atilde
JacobiStep = (Id - (1.0 / rho_DinvA) * Dinv_A).T.tocsr()
for i in range(ninc):
Atilde = Atilde * JacobiStep
del JacobiStep
mask.data[:] = 1.0
Atilde = Atilde.multiply(mask)
Atilde.eliminate_zeros()
Atilde.sort_indices()
elif nsquare == 0:
if numPDEs > 1:
mask.data[:] = 1.0
Atilde = Atilde.multiply(mask)
Atilde.eliminate_zeros()
Atilde.sort_indices()
else:
for i in range(nsquare - 1):
Atilde = Atilde * Atilde
AtildeCSC = Atilde.tocsc()
AtildeCSC.sort_indices()
mask.sort_indices()
Atilde.sort_indices()
amg_core.incomplete_mat_mult_csr(Atilde.indptr, Atilde.indices,
Atilde.data, AtildeCSC.indptr,
AtildeCSC.indices, AtildeCSC.data,
mask.indptr, mask.indices, mask.data,
dimen)
del AtildeCSC, Atilde
Atilde = mask
Atilde.eliminate_zeros()
Atilde.sort_indices()
del Dinv, Dinv_A, mask
BDBCols = int(np.sum(np.arange(NullDim + 1)))
BDB = np.zeros((dimen, BDBCols), dtype=A.dtype)
counter = 0
for i in range(NullDim):
for j in range(i, NullDim):
BDB[:, counter] = 2.0 *\
(np.conjugate(np.ravel(np.asarray(B[:, i]))) *
np.ravel(np.asarray(D_A * B[:, j])))
counter = counter + 1
t = Atilde.dtype.char
eps = np.finfo(np.float).eps
feps = np.finfo(np.single).eps
geps = np.finfo(np.longfloat).eps
_array_precision = {: 0, : 1, : 2, : 0, : 1, : 2}
tol = {0: feps * 1e3, 1: eps * 1e6, 2: geps * 1e6}[_array_precision[t]]
amg_core.evolution_strength_helper(Atilde.data,
Atilde.indptr,
Atilde.indices,
Atilde.shape[0],
np.ravel(np.asarray(B)),
np.ravel(np.asarray(
(D_A * np.conjugate(B)).T)),
np.ravel(np.asarray(BDB)),
BDBCols, NullDim, tol)
Atilde.eliminate_zeros()
Atilde.data = np.array(np.real(Atilde.data), dtype=float)
if epsilon != np.inf:
amg_core.apply_distance_filter(dimen, epsilon, Atilde.indptr,
Atilde.indices, Atilde.data)
Atilde.eliminate_zeros()
if symmetrize_measure:
Atilde = 0.5 * (Atilde + Atilde.T)
Id = sparse.eye(dimen, dimen, format="csr")
Id.data -= Atilde.diagonal()
Atilde = Atilde + Id
if not csrflag:
Atilde = Atilde.tobsr(blocksize=(numPDEs, numPDEs))
n_blocks = Atilde.indices.shape[0]
blocksize = Atilde.blocksize[0] * Atilde.blocksize[1]
CSRdata = np.zeros((n_blocks,))
amg_core.min_blocks(n_blocks, blocksize,
np.ravel(np.asarray(Atilde.data)), CSRdata)
Atilde = sparse.csr_matrix((CSRdata, Atilde.indices, Atilde.indptr),
shape=(int(Atilde.shape[0] / numPDEs),
int(Atilde.shape[1] / numPDEs)))
Atilde.data = 1.0 / Atilde.data
Atilde = scale_rows_by_largest_entry(Atilde)
return Atilde | Evolution Strength Measure.
Construct strength of connection matrix using an Evolution-based measure
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
B : string, array
If B=None, then the near nullspace vector used is all ones. If B is
an (NxK) array, then B is taken to be the near nullspace vectors.
epsilon : scalar
Drop tolerance
k : integer
ODE num time steps, step size is assumed to be 1/rho(DinvA)
proj_type : {'l2','D_A'}
Define norm for constrained min prob, i.e. define projection
block_flag : boolean
If True, use a block D inverse as preconditioner for A during
weighted-Jacobi
Returns
-------
Atilde : csr_matrix
Sparse matrix of strength values
See [2008OlScTu]_ for more details.
References
----------
.. [2008OlScTu] Olson, L. N., Schroder, J., Tuminaro, R. S.,
"A New Perspective on Strength Measures in Algebraic Multigrid",
submitted, June, 2008.
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import evolution_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = evolution_strength_of_connection(A, np.ones((A.shape[0],1))) |
18,141 | def panes(self):
" List with all panes from this Window. "
result = []
for s in self.splits:
for item in s:
if isinstance(item, Pane):
result.append(item)
return result | List with all panes from this Window. |
18,142 | def update_vip(self, vip, body=None):
return self.put(self.vip_path % (vip), body=body) | Updates a load balancer vip. |
18,143 | def parse(self):
index_server = None
for num, line in enumerate(self.iter_lines()):
line = line.rstrip()
if not line:
continue
if line.startswith():
continue
if line.startswith() or \
line.startswith() or \
line.startswith():
index_server = self.parse_index_server(line)
continue
elif self.obj.path and (line.startswith() or line.startswith()):
self.obj.resolved_files.append(self.resolve_file(self.obj.path, line))
elif line.startswith() or line.startswith() or \
line.startswith() or line.startswith() or \
line.startswith() or line.startswith() or \
line.startswith():
continue
elif self.is_marked_line(line):
continue
else:
try:
parseable_line = line
if "\\" in line:
parseable_line = line.replace("\\", "")
for next_line in self.iter_lines(num + 1):
parseable_line += next_line.strip().replace("\\", "")
line += "\n" + next_line
if "\\" in next_line:
continue
break
if self.is_marked_line(parseable_line):
continue
hashes = []
if "--hash" in parseable_line:
parseable_line, hashes = Parser.parse_hashes(parseable_line)
req = RequirementsTXTLineParser.parse(parseable_line)
if req:
req.hashes = hashes
req.index_server = index_server
req.line = line
self.obj.dependencies.append(req)
except ValueError:
continue | Parses a requirements.txt-like file |
18,144 | def _textio_iterlines(stream):
line = stream.readline()
while line != :
yield line
line = stream.readline() | Iterates over lines in a TextIO stream until an EOF is encountered.
This is the iterator version of stream.readlines() |
18,145 | def nearby_faces(mesh, points):
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError()
rtree = mesh.triangles_tree
kdtree = mesh.kdtree
distance_vertex = kdtree.query(points)[0].reshape((-1, 1))
distance_vertex += tol.merge
bounds = np.column_stack((points - distance_vertex,
points + distance_vertex))
candidates = [list(rtree.intersection(b)) for b in bounds]
return candidates | For each point find nearby faces relatively quickly.
The closest point on the mesh to the queried point is guaranteed to be
on one of the faces listed.
Does this by finding the nearest vertex on the mesh to each point, and
then returns all the faces that intersect the axis aligned bounding box
centered at the queried point and extending to the nearest vertex.
Parameters
----------
mesh : Trimesh object
points : (n,3) float , points in space
Returns
-----------
candidates : (points,) int, sequence of indexes for mesh.faces |
18,146 | def _create_RSA_private_key(self,
bytes):
try:
private_key = serialization.load_pem_private_key(
bytes,
password=None,
backend=default_backend()
)
return private_key
except Exception:
private_key = serialization.load_der_private_key(
bytes,
password=None,
backend=default_backend()
)
return private_key | Instantiates an RSA key from bytes.
Args:
bytes (byte string): Bytes of RSA private key.
Returns:
private_key
(cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
RSA private key created from key bytes. |
18,147 | def __get_tax(self, account_id, **kwargs):
params = {
: account_id
}
return self.make_call(self.__get_tax, params, kwargs) | Call documentation: `/account/get_tax
<https://www.wepay.com/developer/reference/account-2011-01-15#get_tax>`_,
plus extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
.. warning ::
This call is depricated as of API version '2014-01-08'. |
18,148 | def element_data_from_Z(Z):
if isinstance(Z, str) and Z.isdecimal():
Z = int(Z)
if Z not in _element_Z_map:
raise KeyError(.format(Z))
return _element_Z_map[Z] | Obtain elemental data given a Z number
An exception is thrown if the Z number is not found |
18,149 | def _rsa_recover_prime_factors(n, e, d):
ktot = d * e - 1
t = ktot
while t % 2 == 0:
t = t // 2
spotted = False
a = 2
while not spotted and a < _MAX_RECOVERY_ATTEMPTS:
k = t
while k < ktot:
cand = pow(a, k, n)
if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1:
p = _gcd(cand + 1, n)
spotted = True
break
k *= 2
a += 2
if not spotted:
raise ValueError("Unable to compute factors p and q from exponent d.")
q, r = divmod(n, p)
assert r == 0
p, q = sorted((p, q), reverse=True)
return (p, q) | Compute factors p and q from the private exponent d. We assume that n has
no more than two factors. This function is adapted from code in PyCrypto. |
18,150 | def find_runner(program):
if os.path.isfile(program) and not os.access(program, os.X_OK):
try:
opened = open(program)
except PermissionError:
return None
first_line = opened.readline().strip()
if first_line.startswith():
return shlex.split(first_line[2:])
if program.endswith():
return [sys.executable]
return None | Return a command that will run program.
Args:
program: The string name of the program to try to run.
Returns:
commandline list of strings to run the program (eg. with subprocess.call()) or None |
18,151 | def setup_versioned_routes(routes, version=None):
prefix = + version if version else ""
for r in routes:
path, method = r
route(prefix + path, method, routes[r]) | Set up routes with a version prefix. |
18,152 | def delete_operation(self, name):
conn = self.get_conn()
resp = (conn
.projects()
.operations()
.delete(name=name)
.execute(num_retries=self.num_retries))
return resp | Deletes the long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete
:param name: the name of the operation resource.
:type name: str
:return: none if successful.
:rtype: dict |
18,153 | def debug_ratelimit(g):
assert isinstance(g, github.MainClass.Github), type(g)
debug("github ratelimit: {rl}".format(rl=g.rate_limiting)) | Log debug of github ratelimit information from last API call
Parameters
----------
org: github.MainClass.Github
github object |
18,154 | def sort_by_number_values(x00, y00):
if len(x00) < len(y00):
return 1
if len(x00) > len(y00):
return -1
return 0 | Compare x00, y00 base on number of values
:param x00: first elem to compare
:type x00: list
:param y00: second elem to compare
:type y00: list
:return: x00 > y00 (-1) if len(x00) > len(y00), x00 == y00 (0) if id equals, x00 < y00 (1) else
:rtype: int |
18,155 | def config_get(self, param, default=None):
try:
return self("config", "--get", param,
log_fail=False, log_cmd=False)
except exception.CommandFailed:
return default | Return the value of a git configuration option. This will
return the value of the default parameter (which defaults to
None) if the given option does not exist. |
18,156 | def save(self, filename, binary=True):
filename = os.path.abspath(os.path.expanduser(filename))
if in filename:
writer = vtk.vtkRectilinearGridWriter()
legacy = True
elif in filename:
writer = vtk.vtkXMLRectilinearGridWriter()
legacy = False
else:
raise Exception( +
)
writer.SetFileName(filename)
writer.SetInputData(self)
if binary and legacy:
writer.SetFileTypeToBinary()
writer.Write() | Writes a rectilinear grid to disk.
Parameters
----------
filename : str
Filename of grid to be written. The file extension will select the
type of writer to use. ".vtk" will use the legacy writer, while
".vtr" will select the VTK XML writer.
binary : bool, optional
Writes as a binary file by default. Set to False to write ASCII.
Notes
-----
Binary files write much faster than ASCII, but binary files written on
one system may not be readable on other systems. Binary can be used
only with the legacy writer. |
18,157 | def remove():
current = True
root = winreg.HKEY_CURRENT_USER if current else winreg.HKEY_LOCAL_MACHINE
for key in (KEY_C1 % ("", EWS), KEY_C1 % ("NoCon", EWS),
KEY_C0 % ("", EWS), KEY_C0 % ("NoCon", EWS)):
try:
winreg.DeleteKey(root, key)
except WindowsError:
pass
else:
if not is_bdist_wininst:
print("Successfully removed Spyder shortcuts from Windows "\
"Explorer context menu.", file=sys.stdout)
if not is_bdist_wininst:
desktop_folder = get_special_folder_path("CSIDL_DESKTOPDIRECTORY")
fname = osp.join(desktop_folder, )
if osp.isfile(fname):
try:
os.remove(fname)
except OSError:
print("Failed to remove %s; you may be able to remove it "\
"manually." % fname, file=sys.stderr)
else:
print("Successfully removed Spyder shortcuts from your desktop.",
file=sys.stdout)
start_menu = osp.join(get_special_folder_path(),
% (sys.version_info[0],
sys.version_info[1],
struct.calcsize()*8))
if osp.isdir(start_menu):
for fname in os.listdir(start_menu):
try:
os.remove(osp.join(start_menu,fname))
except OSError:
print("Failed to remove %s; you may be able to remove it "\
"manually." % fname, file=sys.stderr)
else:
print("Successfully removed Spyder shortcuts from your "\
" start menu.", file=sys.stdout)
try:
os.rmdir(start_menu)
except OSError:
print("Failed to remove %s; you may be able to remove it "\
"manually." % fname, file=sys.stderr)
else:
print("Successfully removed Spyder shortcut folder from your "\
" start menu.", file=sys.stdout) | Function executed when running the script with the -remove switch |
18,158 | def writelines(lines, filename, encoding=, mode=):
return write(os.linesep.join(lines), filename, encoding, mode) | Write 'lines' to file ('filename') assuming 'encoding'
Return (eventually new) encoding |
18,159 | def weed(self):
_ext = [k for k in self._dict.keys() if k not in self.c_param]
for k in _ext:
del self._dict[k] | Get rid of key value pairs that are not standard |
18,160 | def get_data(self, path, **params):
xml = self.get_response(path, **params)
try:
return parse(xml)
except Exception as err:
print(path)
print(params)
print(err)
raise | Giving a service path and optional specific arguments, returns
the XML data from the API parsed as a dict structure. |
18,161 | def mock_bable(monkeypatch):
mocked_bable = MockBaBLE()
mocked_bable.set_controllers([
Controller(0, , ),
Controller(1, , , settings={: True, : True}),
Controller(2, , , settings={: True})
])
monkeypatch.setattr(bable_interface, , lambda: mocked_bable)
return mocked_bable | Mock the BaBLEInterface class with some controllers inside. |
18,162 | def arraydifference(X,Y):
if len(Y) > 0:
Z = isin(X,Y)
return X[np.invert(Z)]
else:
return X | Elements of a numpy array that do not appear in another.
Fast routine for determining which elements in numpy array `X`
do not appear in numpy array `Y`.
**Parameters**
**X** : numpy array
Numpy array to comapare to numpy array `Y`.
Return subset of `X` corresponding to elements not in `Y`.
**Y** : numpy array
Numpy array to which numpy array `X` is compared.
Return subset of `X` corresponding to elements not in `Y`.
**Returns**
**Z** : numpy array
Subset of `X` corresponding to elements not in `Y`.
**See Also:**
:func:`tabular.fast.recarraydifference`, :func:`tabular.fast.isin` |
18,163 | def _resolve_group_location(self, group: str) -> str:
if os.path.isabs(group):
possible_paths = [group]
else:
possible_paths = []
for repository in self.setting_repositories:
possible_paths.append(os.path.join(repository, group))
for default_setting_extension in self.default_setting_extensions:
number_of_paths = len(possible_paths)
for i in range(number_of_paths):
path_with_extension = "%s.%s" % (possible_paths[i], default_setting_extension)
possible_paths.append(path_with_extension)
for path in possible_paths:
if os.path.exists(path):
return path
raise ValueError("Could not resolve location of settings identified by: \"%s\"" % group) | Resolves the location of a setting file based on the given identifier.
:param group: the identifier for the group's settings file (~its location)
:return: the absolute path of the settings location |
18,164 | def put(self, local_path, remote_path=None):
if remote_path is None:
remote_path = os.path.basename(local_path)
ftp = self.ssh.open_sftp()
if os.path.isdir(local_path):
self.__put_dir(ftp, local_path, remote_path)
else:
ftp.put(local_path, remote_path)
ftp.close() | Copy a file (or directory recursively) to a location on the remote server
:param local_path: Local path to copy to; can be file or directory
:param remote_path: Remote path to copy to (default: None - Copies file or directory to
home directory directory on the remote server) |
18,165 | def validateSamOptions(options, group=False):
if options.per_gene:
if options.gene_tag and options.per_contig:
raise ValueError("need to use either --per-contig "
"OR --gene-tag, please do not provide both")
if not options.per_contig and not options.gene_tag:
raise ValueError("for per-gene applications, must supply "
"--per-contig or --gene-tag")
if options.per_contig and not options.per_gene:
raise ValueError("need to use --per-gene with --per-contig")
if options.gene_tag and not options.per_gene:
raise ValueError("need to use --per-gene with --gene_tag")
if options.gene_transcript_map and not options.per_contig:
raise ValueError("need to use --per-contig and --per-gene"
"with --gene-transcript-map")
if options.get_umi_method == "tag":
if options.umi_tag is None:
raise ValueError("Need to supply the --umi-tag option")
if options.per_cell and options.cell_tag is None:
raise ValueError("Need to supply the --cell-tag option")
if options.assigned_tag is None:
options.assigned_tag = options.gene_tag
if options.skip_regex:
try:
re.compile(options.skip_regex)
except re.error:
raise ValueError("skip-regex is not a "
"valid regex" % options.skip_regex)
if not group:
if options.unmapped_reads == "output":
raise ValueError("Cannot use --unmapped-reads=output. If you want "
"to retain unmapped without deduplicating them, "
"use the group command")
if options.chimeric_pairs == "output":
raise ValueError("Cannot use --chimeric-pairs=output. If you want "
"to retain chimeric read pairs without "
"deduplicating them, use the group command")
if options.unpaired_reads == "output":
raise ValueError("Cannot use --unpaired-reads=output. If you want "
"to retain unmapped without deduplicating them, "
"use the group command")
if options.paired:
if options.chimeric_pairs == "use":
warn("Chimeric read pairs are being used. "
"Some read pair UMIs may be grouped/deduplicated using "
"just the mapping coordinates from read1."
"This may also increase the run time and memory usage. "
"Consider --chimeric-pairs==discard to discard these reads "
"or --chimeric-pairs==output (group command only) to "
"output them without grouping")
if options.unpaired_reads == "use":
warn("Unpaired read pairs are being used. "
"Some read pair UMIs may be grouped/deduplicated using "
"just the mapping coordinates from read1."
"This may also increase the run time and memory usage. "
"Consider --unpared-reads==discard to discard these reads "
"or --unpared-reads==output (group command only) to "
"output them without grouping")
if options.unmapped_reads == "use":
warn("Unmapped read pairs are being used. "
"Some read pair UMIs may be grouped/deduplicated using "
"just the mapping coordinates from read1. "
"This may also increase the run time and memory usage. "
"Consider --unmapped_reads==discard to discard these reads "
"or --unmapped_reads==output (group command only) to "
"output them without grouping")
command = " ".join(sys.argv)
info("command: %s" % command)
if "--umi-tag" in command or "--cell-tag" in command:
if options.get_umi_method != "tag":
raise ValueError("--umi-tag and/or --cell-tag options provided. "
"Need to set --extract-umi-method=tag")
if options.unmapped_reads == "use":
if not options.paired:
raise ValueError("--unmapped-reads=use is only compatible with "
"paired end reads (--paired)")
if "--chimeric-pairs" in command:
info("command: %s" % command)
if not options.paired:
raise ValueError("--chimeric-pairs is only compatible "
"with paired end reads (--paired)")
if "--unpaired-reads" in command:
if not options.paired:
raise ValueError("--unpaired-reads is only compatible "
"with paired end reads (--paired)")
if options.output_unmapped:
warn("--output-unmapped will be removed in the near future. "
"Use --unmapped-reads=output instead")
if "--unmapped_reads" in command:
raise ValueError("Do not use --output-unmapped in combination with"
"--unmapped-reads. Just use --unmapped-reads")
options.unmapped_reads = "output" | Check the validity of the option combinations for sam/bam input |
18,166 | def launch(exec_, args):
if not exec_:
raise RuntimeError(
.format(DEVELOPER_NAME)
)
if args.debug:
return
watched = WatchFile()
cmd = [exec_] if args.file is None else [exec_, args.file]
cmd.extend([, , watched.path])
if args.debug:
cmd.append()
maya = subprocess.Popen(cmd)
while True:
time.sleep(1)
maya.poll()
watched.check()
if maya.returncode is not None:
if not maya.returncode == 0:
maya = subprocess.Popen(cmd)
else:
watched.stop()
break | Launches application. |
18,167 | def search(self):
try:
filters = json.loads(self.query)
except ValueError:
return False
result = self.model_query
if in filters.keys():
result = self.parse_filter(filters[])
if in filters.keys():
result = result.order_by(*self.sort(filters[]))
return result | This is the most important method |
18,168 | def get_nan_locs(self, **kwargs):
if np.issubdtype(self.X.dtype, np.string_) or np.issubdtype(self.X.dtype, np.unicode_):
mask = np.where( self.X == )
nan_matrix = np.zeros(self.X.shape)
nan_matrix[mask] = np.nan
else:
nan_matrix = self.X.astype(float)
if self.y is None:
return np.argwhere(np.isnan(nan_matrix))
else:
nan_locs = []
for target_value in np.unique(self.y):
indices = np.argwhere(self.y == target_value)
target_matrix = nan_matrix[indices.flatten()]
nan_target_locs = np.argwhere(np.isnan(target_matrix))
nan_locs.append((target_value, nan_target_locs))
return nan_locs | Gets the locations of nans in feature data and returns
the coordinates in the matrix |
18,169 | def add_point_region(self, y: float, x: float) -> Graphic:
graphic = Graphics.PointGraphic()
graphic.position = Geometry.FloatPoint(y, x)
self.__display_item.add_graphic(graphic)
return Graphic(graphic) | Add a point graphic to the data item.
:param x: The x coordinate, in relative units [0.0, 1.0]
:param y: The y coordinate, in relative units [0.0, 1.0]
:return: The :py:class:`nion.swift.Facade.Graphic` object that was added.
.. versionadded:: 1.0
Scriptable: Yes |
18,170 | def _unpack(struct, bc, offset=0):
return struct.unpack_from(bc, offset), offset + struct.size | returns the unpacked data tuple, and the next offset past the
unpacked data |
18,171 | def query(self, model_cls):
self._filters_cmd = list()
self.query_filters = list()
self._order_by_cmd = None
self._offset = 0
self._limit = 0
self.query_class = model_cls._name
return self | SQLAlchemy query like method |
18,172 | def _mark_void(self):
self.invoice.status = commerce.Invoice.STATUS_VOID
self.invoice.save() | Marks the invoice as refunded, and updates the attached cart if
necessary. |
18,173 | def open(self, file_path):
from simplesqlite import SimpleSQLite
if self.is_opened():
if self.stream.database_path == abspath(file_path):
self._logger.logger.debug(
"database already opened: {}".format(self.stream.database_path)
)
return
self.close()
self._stream = SimpleSQLite(file_path, "w") | Open a SQLite database file.
:param str file_path: SQLite database file path to open. |
18,174 | def is_matching_mime_type(self, mime_type):
if len(self.include_mime_types) == 0:
return True
if mime_type is None:
return False
mime_type = mime_type.lower()
return any(mime_type.startswith(mt) for mt in self.include_mime_types) | This implements the MIME-type matching logic for deciding whether
to run `make_clean_html` |
18,175 | def bits(self, count):
if count < 0:
raise ValueError
if count > self._bits:
n_bytes = (count - self._bits + 7) // 8
data = self._fileobj.read(n_bytes)
if len(data) != n_bytes:
raise BitReaderError("not enough data")
for b in bytearray(data):
self._buffer = (self._buffer << 8) | b
self._bits += n_bytes * 8
self._bits -= count
value = self._buffer >> self._bits
self._buffer &= (1 << self._bits) - 1
assert self._bits < 8
return value | Reads `count` bits and returns an uint, MSB read first.
May raise BitReaderError if not enough data could be read or
IOError by the underlying file object. |
18,176 | def get_gid_list(user, include_default=True):
if HAS_GRP is False or HAS_PWD is False:
return []
gid_list = list(
six.itervalues(
get_group_dict(user, include_default=include_default)
)
)
return sorted(set(gid_list)) | Returns a list of all of the system group IDs of which the user
is a member. |
18,177 | def __get_eval_info(self):
if self.__need_reload_eval_info:
self.__need_reload_eval_info = False
out_num_eval = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetEvalCounts(
self.handle,
ctypes.byref(out_num_eval)))
self.__num_inner_eval = out_num_eval.value
if self.__num_inner_eval > 0:
tmp_out_len = ctypes.c_int(0)
string_buffers = [ctypes.create_string_buffer(255) for i in range_(self.__num_inner_eval)]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
ctypes.byref(tmp_out_len),
ptr_string_buffers))
if self.__num_inner_eval != tmp_out_len.value:
raise ValueError("Length of eval names doesnaucndcg@map@')) for name in self.__name_inner_eval] | Get inner evaluation count and names. |
18,178 | def MaxPooling(
inputs,
pool_size,
strides=None,
padding=,
data_format=):
if strides is None:
strides = pool_size
layer = tf.layers.MaxPooling2D(pool_size, strides, padding=padding, data_format=data_format)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
return tf.identity(ret, name=) | Same as `tf.layers.MaxPooling2D`. Default strides is equal to pool_size. |
18,179 | def __get_neighbors(self, node_index):
return [ index for index in range(len(self.__data_pointer[node_index])) if self.__data_pointer[node_index][index] != 0 ]; | !
@brief Returns indexes of neighbors of the specified node.
@param[in] node_index (uint):
@return (list) Neighbors of the specified node. |
18,180 | def gen_hyper_keys(minion_id,
country=,
state=,
locality=,
organization=,
expiration_days=):
key_dir = os.path.join(
__opts__[],
)
if not os.path.isdir(key_dir):
os.makedirs(key_dir)
cakey = os.path.join(key_dir, )
cacert = os.path.join(key_dir, )
cainfo = os.path.join(key_dir, )
if not os.path.isfile(cainfo):
with salt.utils.files.fopen(cainfo, ) as fp_:
fp_.write()
if not os.path.isfile(cakey):
subprocess.call(
.format(cakey),
shell=True)
if not os.path.isfile(cacert):
cmd = (
).format(cakey, cainfo, cacert)
subprocess.call(cmd, shell=True)
sub_dir = os.path.join(key_dir, minion_id)
if not os.path.isdir(sub_dir):
os.makedirs(sub_dir)
priv = os.path.join(sub_dir, )
cert = os.path.join(sub_dir, )
srvinfo = os.path.join(sub_dir, )
cpriv = os.path.join(sub_dir, )
ccert = os.path.join(sub_dir, )
clientinfo = os.path.join(sub_dir, )
if not os.path.isfile(srvinfo):
with salt.utils.files.fopen(srvinfo, ) as fp_:
infodat = salt.utils.stringutils.to_str(
.format(
__grains__[], expiration_days
)
)
fp_.write(infodat)
if not os.path.isfile(priv):
subprocess.call(
.format(priv),
shell=True)
if not os.path.isfile(cert):
cmd = (
).format(priv, cacert, cakey, srvinfo, cert)
subprocess.call(cmd, shell=True)
if not os.path.isfile(clientinfo):
with salt.utils.files.fopen(clientinfo, ) as fp_:
infodat = salt.utils.stringutils.to_str(
.format(
country,
state,
locality,
organization,
__grains__[]
)
)
fp_.write(infodat)
if not os.path.isfile(cpriv):
subprocess.call(
.format(cpriv),
shell=True)
if not os.path.isfile(ccert):
cmd = (
).format(cpriv, cacert, cakey, clientinfo, ccert)
subprocess.call(cmd, shell=True) | Generate the keys to be used by libvirt hypervisors, this routine gens
the keys and applies them to the pillar for the hypervisor minions |
18,181 | def list_(properties=, zpool=None, parsable=True):
***size,free*size,free
ret = OrderedDict()
if not isinstance(properties, list):
properties = properties.split()
while in properties:
properties.remove()
properties.insert(0, )
return ret | .. versionadded:: 2015.5.0
Return information about (all) storage pools
zpool : string
optional name of storage pool
properties : string
comma-separated list of properties to list
parsable : boolean
display numbers in parsable (exact) values
.. versionadded:: 2018.3.0
.. note::
The ``name`` property will always be included, while the ``frag``
property will get removed if not available
zpool : string
optional zpool
.. note::
Multiple storage pool can be provded as a space separated list
CLI Example:
.. code-block:: bash
salt '*' zpool.list
salt '*' zpool.list zpool=tank
salt '*' zpool.list 'size,free'
salt '*' zpool.list 'size,free' tank |
18,182 | def remove(self, dic):
for kw in dic:
removePair = Pair(kw, dic[kw])
self._remove([removePair]) | remove the pair by passing a identical dict
Args:
dic (dict): key and value |
18,183 | def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace(%s\, obj_name)
else:
log.trace(%s\
,
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
{0}\
.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
log.trace(,
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, [])
mount_infos = props.get(, [])
disk_datastores = []
continue
log.trace(%s\%s\,
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace(, disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace(
%s\, backing_disk_ids)
return []
log.trace(, datastore_names)
if isinstance(reference, vim.HostSystem):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path=,
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path=,
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
{0}\
.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=[],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace(, len(items))
items = [i for i in items if get_all_datastores or i[] in
datastore_names]
log.trace(, [i[] for i in items])
return [i[] for i in items] | Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False. |
18,184 | def index_all(self):
self.logger.debug(,
self.record_path)
with self.db.connection():
for json_path in sorted(self.find_record_files()):
self.index_record(json_path) | Index all records under :attr:`record_path`. |
18,185 | def _identify_heterogeneity_blocks_seg(in_file, seg_file, params, work_dir, somatic_info):
def _segment_by_cns(target_chrom, freqs, coords):
with open(seg_file) as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
next(reader)
for cur_chrom, start, end in (xs[:3] for xs in reader):
if cur_chrom == target_chrom:
block_freqs = []
for i, (freq, coord) in enumerate(zip(freqs, coords)):
if coord >= int(start) and coord < int(end):
block_freqs.append(freq)
elif coord >= int(end):
break
coords = coords[max(0, i - 1):]
freqs = freqs[max(0, i - 1):]
if len(block_freqs) > params["hetblock"]["min_alleles"]:
yield start, end
return _identify_heterogeneity_blocks_shared(in_file, _segment_by_cns, params, work_dir, somatic_info) | Identify heterogeneity blocks corresponding to segmentation from CNV input file. |
18,186 | def _library_check(self):
try:
output = yield from gns3server.utils.asyncio.subprocess_check_output("ldd", self._path)
except (FileNotFoundError, subprocess.SubprocessError) as e:
log.warn("Could not determine the shared library dependencies for {}: {}".format(self._path, e))
return
p = re.compile("([\.\w]+)\s=>\s+not found")
missing_libs = p.findall(output)
if missing_libs:
raise IOUError("The following shared library dependencies cannot be found for IOU image {}: {}".format(self._path,
", ".join(missing_libs))) | Checks for missing shared library dependencies in the IOU image. |
18,187 | def get_content_version(cls, abspath: str) -> str:
data = cls.get_content(abspath)
hasher = hashlib.md5()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest() | Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a hash of the file's contents.
.. versionadded:: 3.1 |
18,188 | def manifest(txt, dname):
_, files = _expand_source(txt, dname, HTML)
return files | Extracts file manifest for a body of text with the given directory. |
18,189 | def plot_one_month(x, y, xlabel=None, ylabel=None, title=None, ylim=None):
plt.close("all")
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
ax.plot(x, y)
days = DayLocator(range(365))
daysFmt = DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_locator(days)
ax.xaxis.set_major_formatter(daysFmt)
ax.autoscale_view()
ax.grid()
plt.setp( ax.xaxis.get_majorticklabels(), rotation=90 )
if xlabel:
plt.xlabel(xlabel)
else:
plt.xlabel("Time")
if ylabel:
plt.ylabel(ylabel)
else:
plt.ylabel("Value")
if title:
plt.title(title)
else:
plt.title("%s to %s" % (str(x[0]), str(x[-1]) ) )
if ylim:
plt.ylim(ylim)
else:
plt.ylim([min(y) - (max(y) - min(y) ) * 0.05,
max(y) + (max(y) - min(y) ) * 0.05])
return plt, ax | 时间跨度为一月。
major tick = every days |
18,190 | def user_save(self):
if not self.cur_user:
return
username = self.user_username_le.text()
first = self.user_first_le.text()
last = self.user_last_le.text()
email = self.user_email_le.text()
self.cur_user.username = username
self.cur_user.first_name = first
self.cur_user.last_name = last
self.cur_user.email = email
self.cur_user.save() | Save the current user
:returns: None
:rtype: None
:raises: None |
18,191 | def comments(context, obj):
content_type = ContentType.objects.get_for_model(obj.__class__)
comment_list = LogEntry.objects.filter(
content_type=content_type,
object_id=obj.pk,
action_flag=COMMENT
)
return {
: obj,
: comment_list,
: context[],
} | Render comments for obj. |
18,192 | def rev_after(self, rev: int) -> int:
self.seek(rev)
if self._future:
return self._future[-1][0] | Return the earliest future rev on which the value will change. |
18,193 | def _distarray_no_missing(self, xc, xd):
from scipy.spatial.distance import pdist, squareform
def pre_normalize(x):
idx = 0
for i in sorted(self.attr.keys()):
if self.attr[i][0] == :
continue
cmin = self.attr[i][2]
diff = self.attr[i][3]
x[:, idx] -= cmin
x[:, idx] /= diff
idx += 1
return x
if self.data_type == :
return squareform(pdist(self._X, metric=))
elif self.data_type == :
d_dist = squareform(pdist(xd, metric=))
c_dist = squareform(pdist(pre_normalize(xc), metric=))
return np.add(d_dist, c_dist) / self._num_attributes
else:
return squareform(pdist(pre_normalize(xc), metric=)) | Distance array calculation for data with no missing values. The 'pdist() function outputs a condense distance array, and squareform() converts this vector-form
distance vector to a square-form, redundant distance matrix.
*This could be a target for saving memory in the future, by not needing to expand to the redundant square-form matrix. |
18,194 | def render_pyquery(self, **kwargs):
from pyquery import PyQuery as pq
return pq(self.render(**kwargs), parser=) | Render the graph, and return a pyquery wrapped tree |
18,195 | def _trim_tree(state):
for n in list(state.tree.leaf_node_gen):
if n.type_str == TYPE_NODE_TAG:
n.parent.child_list.remove(n)
return _trim_tree(state) | Trim empty leaf nodes from the tree.
- To simplify the tree conversion, empty nodes are added before it is known if they
will contain items that connect back to the authenticated subject. If there are
no connections, the nodes remain empty, which causes them to be removed here.
- Removing a leaf node may cause the parent to become a new empty leaf node, so the
function is repeated until there are no more empty leaf nodes. |
18,196 | def init(opts):
proxy_dict = opts.get(, {})
opts[] = proxy_dict.get(, False)
netmiko_connection_args = proxy_dict.copy()
netmiko_connection_args.pop(, None)
netmiko_device[] = netmiko_connection_args.pop(,
opts.get(, True))
try:
connection = ConnectHandler(**netmiko_connection_args)
netmiko_device[] = connection
netmiko_device[] = True
netmiko_device[] = netmiko_connection_args
netmiko_device[] = True
if not netmiko_device[]:
netmiko_device[].disconnect()
except NetMikoTimeoutException as t_err:
log.error(, exc_info=True)
except NetMikoAuthenticationException as au_err:
log.error(, exc_info=True)
return True | Open the connection to the network device
managed through netmiko. |
18,197 | def _make_reversed_wildcards(self, old_length=-1):
if len(self._reversed_wildcards) > 0:
start = old_length
else:
start = -1
for wildcards, func in self._wildcard_functions.items():
for irun in range(start, len(self)):
translated_name = func(irun)
if not translated_name in self._reversed_wildcards:
self._reversed_wildcards[translated_name] = ([], wildcards)
self._reversed_wildcards[translated_name][0].append(irun) | Creates a full mapping from all wildcard translations to the corresponding wildcards |
18,198 | def _handle_exception(self, row, exception):
self._log(.format(self._source_reader.row_number))
self._log(row)
self._log(str(exception))
self._log(traceback.format_exc()) | Logs an exception occurred during transformation of a row.
:param list|dict|() row: The source row.
:param Exception exception: The exception. |
18,199 | def make_cutter(self):
return cadquery.Workplane() \
.circle(self.access_diameter / 2) \
.extrude(self.access_height) | Create solid to subtract from material to make way for the fastener's
head (just the head) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.