Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
20,700 | def _ParseFileEntry(self, knowledge_base, file_entry):
file_object = file_entry.GetFileObject()
try:
self._ParseFileData(knowledge_base, file_object)
finally:
file_object.close() | Parses a file entry for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
PreProcessFail: if the preprocessing fails. |
20,701 | def isnumber(self, string, *args):
try:
n, u = utility.analyze_number(string)
except SyntaxError:
return False
return True | Is number
args:
string (str): match
returns:
bool |
20,702 | def begin_run_group(project):
from benchbuild.utils.db import create_run_group
from datetime import datetime
group, session = create_run_group(project)
group.begin = datetime.now()
group.status =
session.commit()
return group, session | Begin a run_group in the database.
A run_group groups a set of runs for a given project. This models a series
of runs that form a complete binary runtime test.
Args:
project: The project we begin a new run_group for.
Returns:
``(group, session)`` where group is the created group in the
database and session is the database session this group lives in. |
20,703 | def get_choices(timezones, grouped=False):
TZOffset = namedtuple(, )
choices_dict = defaultdict(list)
for tz in iter(timezones):
now = datetime.now(pytz.timezone(tz))
offset = now.strftime("%z")
timezone_offset_string = .format(
**TIMEZONE_OFFSET_REGEX.match(offset).groupdict()
)
if not grouped:
display_string = .format(
timezone_offset_string=timezone_offset_string,
tz=tz,
)
else:
display_string = tz
choices_dict[
TZOffset(value=int(offset), offset_string=timezone_offset_string)
].append(
(tz, display_string)
)
choices = []
for tz_offset in sorted(choices_dict, key=attrgetter()):
if not grouped:
choices.extend(
tuple(choices_dict[tz_offset])
)
else:
choices.append(
(
tz_offset.offset_string,
tuple(choices_dict[tz_offset])
)
)
return tuple(choices) | Retrieves timezone choices from any iterable (normally pytz). |
20,704 | def handle_task(self, uuid_task, worker=None):
uuid, task = uuid_task
if task.worker and task.worker.hostname:
worker = self.handle_worker(
(task.worker.hostname, task.worker),
)
defaults = {
: task.name,
: task.args,
: task.kwargs,
: correct_awareness(maybe_iso8601(task.eta)),
: correct_awareness(maybe_iso8601(task.expires)),
: task.state,
: fromtimestamp(task.timestamp),
: task.result or task.exception,
: task.traceback,
: task.runtime,
: worker
}
[defaults.pop(attr, None) for attr in NOT_SAVED_ATTRIBUTES
if defaults[attr] is None]
return self.update_task(task.state,
task_id=uuid, defaults=defaults) | Handle snapshotted event. |
20,705 | def parse(self, text):
if self.format == :
try:
import markdown
except ImportError:
raise RuntimeError(u"Looks like markdown is not installed")
if text.startswith(u):
text = text[1:]
return markdown.markdown(text, extensions=self.md_extensions)
elif self.format == :
try:
from landslide.rst import html_body
except ImportError:
raise RuntimeError(u"Looks like docutils are not installed")
html = html_body(text, input_encoding=self.encoding)
for (pattern, replacement, mode) in self.RST_REPLACEMENTS:
html = re.sub(re.compile(pattern, mode), replacement, html, 0)
return html.strip()
elif self.format == :
try:
import textile
except ImportError:
raise RuntimeError(u"Looks like textile is not installed")
text = text.replace(, )
return textile.textile(text, encoding=self.encoding)
else:
raise NotImplementedError(u"Unsupported format %s, cannot parse"
% self.format) | Parses and renders a text as HTML regarding current format. |
20,706 | def import_rsa_key(pem_data):
if not pem_data.startswith(PREFIX):
pem_data = bytes(.format(PREFIX, pem_data, POSTFIX),
)
else:
pem_data = bytes(pem_data, )
cert = x509.load_pem_x509_certificate(pem_data, default_backend())
return cert.public_key() | Extract an RSA key from a PEM-encoded X.509 certificate
:param pem_data: RSA key encoded in standard form
:return: rsa.RSAPublicKey instance |
20,707 | def _get_base(role, **conn):
base_fields = frozenset([, , , , , ])
needs_base = False
for field in base_fields:
if field not in role:
needs_base = True
break
if needs_base:
role_name = _get_name_from_structure(role, )
role = CloudAux.go(, RoleName=role_name, **conn)
role = role[]
role.update(dict(CreateDate=get_iso_string(role[])))
role[] = 3
return role | Determine whether the boto get_role call needs to be made or if we already have all that data
in the role object.
:param role: dict containing (at the very least) role_name and/or arn.
:param conn: dict containing enough information to make a connection to the desired account.
:return: Camelized dict describing role containing all all base_fields. |
20,708 | def stop(self):
LOGGER.debug("natsd.Driver.stop")
for requester in self.requester_registry:
requester.stop()
self.requester_registry.clear()
for service in self.services_registry:
if service.is_started:
service.stop()
self.services_registry.clear()
return self | Stop services and requestors and then connection.
:return: self |
20,709 | def get_active_choices(self, language_code=None, site_id=None):
if language_code is None:
language_code = get_language()
lang_dict = self.get_language(language_code, site_id=site_id)
if not lang_dict[]:
return [language_code] + [lang for lang in lang_dict[] if lang != language_code]
else:
return [language_code] | Find out which translations should be visible in the site.
It returns a list with either a single choice (the current language),
or a list with the current language + fallback language. |
20,710 | def refresh_jwt_token(self, token, override_access_lifespan=None):
moment = pendulum.now()
with InvalidTokenHeader.handle_errors():
data = jwt.decode(
token,
self.encode_key,
algorithms=self.allowed_algorithms,
options={: False},
)
self._validate_jwt_data(data, access_type=AccessType.refresh)
user = self.user_class.identify(data[])
self._check_user(user)
if override_access_lifespan is None:
access_lifespan = self.access_lifespan
else:
access_lifespan = override_access_lifespan
refresh_expiration = data[]
access_expiration = min(
(moment + access_lifespan).int_timestamp,
refresh_expiration,
)
custom_claims = {
k: v for (k, v) in data.items() if k not in RESERVED_CLAIMS
}
payload_parts = dict(
iat=moment.int_timestamp,
exp=access_expiration,
rf_exp=refresh_expiration,
jti=data[],
id=data[],
rls=.join(user.rolenames),
**custom_claims
)
return jwt.encode(
payload_parts, self.encode_key, self.encode_algorithm,
).decode() | Creates a new token for a user if and only if the old token's access
permission is expired but its refresh permission is not yet expired.
The new token's refresh expiration moment is the same as the old
token's, but the new token's access expiration is refreshed
:param: token: The existing jwt token that needs to
be replaced with a new, refreshed
token
:param: override_access_lifespan: Override's the instance's access
lifespan to set a custom duration
after which the new token's
accessability will expire. May not
exceed the refresh lifespan |
20,711 | def remove(self):
LOGGER.debug("Endpoint.remove - " + self.id + " - " + self.url)
if self.id is None:
return None
else:
if self.id in EndpointService.local_cache_by_id:
for search_params in EndpointService.local_cache_by_id[self.id]:
if search_params in EndpointService.local_cache_by_params.keys():
EndpointService.local_cache_by_params.pop(search_params)
EndpointService.local_cache_by_id.pop(self.id)
params = SessionService.complete_transactional_req({
: self.id
})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params[] =
args = {: params}
else:
args = {: , : , : params}
response = EndpointService.requester.call(args)
if MappingService.driver_type != DriverFactory.DRIVER_REST:
response = response.get()
if response.rc != 0:
LOGGER.warning(
+ str(self.id) +
+ str(response.response_content) + + str(response.error_message) +
" (" + str(response.rc) + ")"
)
if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message:
raise ArianeMappingOverloadError("Endpoint.remove",
ArianeMappingOverloadError.ERROR_MSG)
return self
else:
if self.parent_node is not None:
self.parent_node.sync()
return None | remove this endpoint from Ariane server
:return: |
20,712 | def sub_array_2d_from_sub_array_1d(self, sub_array_1d):
sub_shape = (self.mask.shape[0] * self.sub_grid_size, self.mask.shape[1] * self.sub_grid_size)
sub_one_to_two = self.mask.masked_sub_grid_index_to_sub_pixel(sub_grid_size=self.sub_grid_size)
return mapping_util.map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(
array_1d=sub_array_1d, shape=sub_shape, one_to_two=sub_one_to_two) | Map a 1D sub-array the same dimension as the sub-grid (e.g. including sub-pixels) to its original masked
2D sub array.
Parameters
-----------
sub_array_1d : ndarray
The 1D sub_array which is mapped to its masked 2D sub-array. |
20,713 | def str2url(str):
try:
str = str.encode()
except:
pass
mfrom = "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝßàáâãäåæçèéêëìíîï"
to = "AAAAAAECEEEEIIIIDNOOOOOOUUUUYSaaaaaaaceeeeiiii"
mfrom += "ñòóôõöøùúûüýÿĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘęĚěĜĝĞğĠġĢģ"
to += "noooooouuuuyyaaaaaaccccccccddddeeeeeeeeeegggggggg"
mfrom += "ĤĥĦħĨĩĪīĬĭĮįİıĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇňʼnŊŋŌōŎŏŐőŒœŔŕŖŗŘř"
to += "hhhhiiiiiiiiiijjkkkllllllllllnnnnnnnnnoooooooorrrrrr"
mfrom += "ŚśŜŝŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀƂƃƄƅƇƈƉƊƐƑƒƓƔ"
to += "ssssssssttttttuuuuuuuuuuuuwwyyyzzzzzzfbbbbbccddeffgv"
mfrom += "ƖƗƘƙƚƝƞƟƠƤƦƫƬƭƮƯưƱƲƳƴƵƶǍǎǏǐǑǒǓǔǕǖǗǘǙǚǛǜǝǞǟǠǡǢǣǤǥǦǧǨǩ"
to += "likklnnoopettttuuuuyyzzaaiioouuuuuuuuuueaaaaeeggggkk"
mfrom += "ǪǫǬǭǰǴǵǷǸǹǺǻǼǽǾǿȀȁȂȃȄȅȆȇȈȉȊȋȌȍȎȏȐȑȒȓȔȕȖȗȘșȚțȞȟȤȥȦȧȨȩ"
to += "oooojggpnnaaeeooaaaaeeeeiiiioooorrrruuuusstthhzzaaee"
mfrom += "ȪȫȬȭȮȯȰȱȲȳḀḁḂḃḄḅḆḇḈḉḊḋḌḍḎḏḐḑḒḓḔḕḖḗḘḙḚḛḜḝḞḟḠḡḢḣḤḥḦḧḨḩḪḫ"
to += "ooooooooyyaabbbbbbccddddddddddeeeeeeeeeeffgghhhhhhhhhh"
mfrom += "ḬḭḮḯḰḱḲḳḴḵḶḷḸḹḺḻḼḽḾḿṀṁṂṃṄṅṆṇṈṉṊṋṌṍṎṏṐṑṒṓṔṕṖṗṘṙṚṛṜṝṞṟ"
to += "iiiikkkkkkllllllllmmmmmmnnnnnnnnoooooooopppprrrrrrrr"
mfrom += "ṠṡṢṣṤṥṦṧṨṩṪṫṬṭṮṯṰṱṲṳṴṵṶṷṸṹṺṻṼṽṾṿẀẁẂẃẄẅẆẇẈẉẊẋẌẍẎẏẐẑẒẓẔẕ"
to += "ssssssssssttttttttuuuuuuuuuuvvvvwwwwwwwwwwxxxxxyzzzzzz"
mfrom += "ẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀềỂểỄễỆệỈỉỊị"
to += "htwyafaaaaaaaaaaaaaaaaaaaaaaaaeeeeeeeeeeeeeeeeiiii"
mfrom += "ỌọỎỏỐốỒồỔổỖỗỘộỚớỜờỞởỠỡỢợỤụỦủỨứỪừỬửỮữỰựỲỳỴỵỶỷỸỹ"
to += "oooooooooooooooooooooooouuuuuuuuuuuuuuyyyyyyyy"
for i in zip(mfrom, to):
str = str.replace(*i)
return str | Takes a UTF-8 string and replaces all characters with the equivalent in 7-bit
ASCII. It returns a plain ASCII string usable in URLs. |
20,714 | def valuefrompostdata(self, postdata):
if self.id in postdata and postdata[self.id] != :
return float(postdata[self.id])
else:
return None | This parameter method searches the POST data and retrieves the values it needs. It does not set the value yet though, but simply returns it. Needs to be explicitly passed to parameter.set() |
20,715 | def _proxy(self):
if self._context is None:
self._context = SyncListItemContext(
self._version,
service_sid=self._solution[],
list_sid=self._solution[],
index=self._solution[],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncListItemContext for this SyncListItemInstance
:rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext |
20,716 | def toVName(name, stripNum=0, upper=False):
if upper:
name = name.upper()
if stripNum != 0:
name = name[:-stripNum]
return name.replace(, ) | Turn a Python name into an iCalendar style name,
optionally uppercase and with characters stripped off. |
20,717 | def delete(self, subnet_id):
subnet = self.client.describe_subnets(
SubnetIds=[subnet_id]).get()[0]
vpc_id = subnet.get()
self.client.delete_subnet(SubnetId=subnet_id)
return self.client.delete_vpc(VpcId=vpc_id) | This is bad delete function
because one vpc can have more than one subnet.
It is Ok if user only use CAL for manage cloud resource
We will update ASAP. |
20,718 | def getSkeletalSummaryData(self, action):
fn = self.function_table.getSkeletalSummaryData
pSkeletalSummaryData = VRSkeletalSummaryData_t()
result = fn(action, byref(pSkeletalSummaryData))
return result, pSkeletalSummaryData | Reads summary information about the current pose of the skeleton associated with the given action. |
20,719 | def _get_namespace2go2term(go2terms):
namespace2go2term = cx.defaultdict(dict)
for goid, goterm in go2terms.items():
namespace2go2term[goterm.namespace][goid] = goterm
return namespace2go2term | Group GO IDs by namespace. |
20,720 | def agreement_weighted(ci, wts):
s modularity score). Such a choice would add more weight to
higher modularity partitions.
NOTE: Unlike AGREEMENT, this script does not have the input argument
BUFFSZ.
Parameters
----------
ci : MxN np.ndarray
set of M (possibly degenerate) partitions of N nodes
wts : Mx1 np.ndarray
relative weight of each partition
Returns
-------
D : NxN np.ndarray
weighted agreement matrix
'
ci = np.array(ci)
m, n = ci.shape
wts = np.array(wts) / np.sum(wts)
D = np.zeros((n, n))
for i in range(m):
d = dummyvar(ci[i, :].reshape(1, n))
D += np.dot(d, d.T) * wts[i]
return D | D = AGREEMENT_WEIGHTED(CI,WTS) is identical to AGREEMENT, with the
exception that each partitions contribution is weighted according to
the corresponding scalar value stored in the vector WTS. As an example,
suppose CI contained partitions obtained using some heuristic for
maximizing modularity. A possible choice for WTS might be the Q metric
(Newman's modularity score). Such a choice would add more weight to
higher modularity partitions.
NOTE: Unlike AGREEMENT, this script does not have the input argument
BUFFSZ.
Parameters
----------
ci : MxN np.ndarray
set of M (possibly degenerate) partitions of N nodes
wts : Mx1 np.ndarray
relative weight of each partition
Returns
-------
D : NxN np.ndarray
weighted agreement matrix |
20,721 | def birth(self):
splineReal = scipy.interpolate.splrep(self.x, self.y.real)
self.y_int.real = scipy.interpolate.splev(self.x_int,splineReal)
splineImag = scipy.interpolate.splrep(self.x, self.y.imag)
self.y_int.imag = scipy.interpolate.splev(self.x_int,splineImag) | Create the individual (compute the spline curve) |
20,722 | def create(cls, interface_id, logical_interface_ref, **kw):
data = {: True,
: logical_interface_ref,
: str(interface_id)}
if in kw:
data.update(reset_interface_nicid=kw.get())
return cls(data) | :param int interface_id: the interface id
:param str logical_ref: logical interface reference, must be unique from
inline intfs
:rtype: dict |
20,723 | def _validate_data(dataset, target, features=None, validation_set=):
_raise_error_if_not_sframe(dataset, "training dataset")
if features is None:
features = [feat for feat in dataset.column_names() if feat != target]
if not hasattr(features, ):
raise TypeError("Input must be a list.")
if not all([isinstance(x, str) for x in features]):
raise TypeError(
"Invalid feature %s: Feature names must be of type str" % x)
if isinstance(validation_set, str):
if validation_set != :
raise TypeError()
elif isinstance(validation_set, _SFrame):
validation_set.head().append(dataset.head())
validation_set = _toolkits_select_columns(
validation_set, features + [target])
elif not validation_set is None:
raise TypeError("validation_set must be either , None, or an "
"SFrame matching the training data.")
dataset = _toolkits_select_columns(dataset, features + [target])
return dataset, validation_set | Validate and canonicalize training and validation data.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
List of feature names used.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance, with
the same schema as the training dataset. Can also be None or 'auto'.
Returns
-------
dataset : SFrame
The input dataset, minus any columns not referenced by target or
features
validation_set : SFrame or str
A canonicalized version of the input validation_set. For SFrame
arguments, the returned SFrame only includes those columns referenced by
target or features. SFrame arguments that do not match the schema of
dataset, or string arguments that are not 'auto', trigger an exception. |
20,724 | def complete(self, text, state):
if self.use_main_ns:
self.namespace = __main__.__dict__
if not text.strip():
if state == 0:
if _readline_available:
readline.insert_text()
readline.redisplay()
return
else:
return
else:
return None
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None | Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'. |
20,725 | def gamma(self, gamma=1.0):
if(isinstance(gamma, (list, tuple, set)) and
len(gamma) != len(self.channels)):
raise ValueError("Number of channels and gamma components differ.")
if isinstance(gamma, (tuple, list)):
gamma_list = list(gamma)
else:
gamma_list = [gamma] * len(self.channels)
for i in range(len(self.channels)):
gamma = float(gamma_list[i])
if gamma < 0:
raise ValueError("Gamma correction must be a positive number.")
logger.debug("Applying gamma %f", gamma)
if gamma == 1.0:
continue
if isinstance(self.channels[i], np.ma.core.MaskedArray):
if ne:
self.channels[i] = np.ma.array(
ne.evaluate("data ** (1.0 / gamma)",
local_dict={"data": self.channels[i].data,
: gamma}),
mask=self.channels[i].mask,
copy=False)
else:
self.channels[i] = np.ma.array(self.channels[i].data **
(1.0 / gamma),
mask=self.channels[i].mask,
copy=False)
else:
self.channels[i] = np.where(self.channels[i] >= 0,
self.channels[i] **
(1.0 / gamma),
self.channels[i]) | Apply gamma correction to the channels of the image. If *gamma* is a
tuple, then it should have as many elements as the channels of the
image, and the gamma correction is applied elementwise. If *gamma* is a
number, the same gamma correction is applied on every channel, if there
are several channels in the image. The behaviour of :func:`gamma` is
undefined outside the normal [0,1] range of the channels. |
20,726 | def normalize_map_between(dictionary, norm_min, norm_max):
if len(dictionary) < 2:
return {}
values = list(dictionary.values())
norm_range = norm_max - norm_min
map_min = min(values)
map_range = max(values) - map_min
range_factor = norm_range / float(map_range)
normalized_map = {}
for key, value in dictionary.items():
normalized_map[key] = norm_min + (value - map_min) * range_factor
return normalized_map | Performs linear normalization of all values in Map between normMin and normMax
:param: map Map to normalize values for
:param: normMin Smallest normalized value
:param: normMax Largest normalized value
:return: A new map with double values within [normMin, normMax] |
20,727 | def _autobox(content, format):
if format == Format.JSON:
return json.loads(content)
elif format == Format.XML:
return etree.fromstring(content)
elif format == Format.CSV:
try:
return csv.reader(io.BytesIO(content))
except TypeError:
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
return unicode_csv_reader(io.StringIO(content))
else:
raise AutoboxError("unknown autobox format %s" % format) | Autobox response content.
:param content: Response content
:type content: str
:param format: Format to return
:type format: `yaxil.Format`
:returns: Autoboxed content
:rtype: dict|xml.etree.ElementTree.Element|csvreader |
20,728 | def check_error(model, path, shapes, output = , verbose = True):
coreml_model = _coremltools.models.MLModel(path)
input_data = {}
input_data_copy = {}
for ip in shapes:
input_data[ip] = _np.random.rand(*shapes[ip]).astype()
input_data_copy[ip] = _np.copy(input_data[ip])
dataIter = _mxnet.io.NDArrayIter(input_data_copy)
mx_out = model.predict(dataIter).flatten()
e_out_dict = coreml_model.predict(_mxnet_remove_batch(input_data))
e_out = e_out_dict[output].flatten()
error = _np.linalg.norm(e_out - mx_out)
if verbose:
print("First few predictions from CoreML : %s" % e_out[0:10])
print("First few predictions from MXNet : %s" % e_out[0:10])
print("L2 Error on random data %s" % error)
return error | Check the difference between predictions from MXNet and CoreML. |
20,729 | def _fchown(self, real, fileno, uid, gid):
path = self._fake_path(self._path_from_fd(fileno))
self._chown_common(path, uid, gid) | Run fake fchown code if fileno points to a sub-path of our tree.
The ownership set with this fake fchown can be inspected by looking
at the self.uid/self.gid dictionaries. |
20,730 | def _args_from_dict(ddata: Mapping[str, Any]):
d_true_keys = {}
uns_is_not_key = False
valid_keys = []
for keys in AnnData._H5_ALIASES.values():
valid_keys += keys
valid_keys += [, , , ]
for key in ddata.keys():
if key not in valid_keys:
uns_is_not_key = True
for true_key, keys in AnnData._H5_ALIASES.items():
for key in keys:
if key in ddata:
d_true_keys[true_key] = ddata[key]
if uns_is_not_key: del ddata[key]
break
else:
d_true_keys[true_key] = None
for true_key, keys in AnnData._H5_ALIASES_NAMES.items():
if d_true_keys[true_key] is not None:
for key in keys:
if key in d_true_keys[true_key].dtype.names:
d_true_keys[true_key] = pd.DataFrame.from_records(
d_true_keys[true_key], index=key)
break
d_true_keys[true_key].index = d_true_keys[true_key].index.astype()
for c in d_true_keys[true_key].columns:
if is_string_dtype(d_true_keys[true_key][c]):
d_true_keys[true_key][c] = pd.Index(
d_true_keys[true_key][c]).astype().values
k_to_delete = []
items = (
ddata.items() if uns_is_not_key
else ddata[].items() if in ddata else []
)
for k, v in items:
if k.endswith():
k_stripped = k.replace(, )
if isinstance(v, (str, int)):
v = [v]
for ann in [, ]:
if k_stripped in d_true_keys[ann]:
d_true_keys[ann][k_stripped] = pd.Categorical.from_codes(
codes=d_true_keys[ann][k_stripped].values,
categories=v,
)
k_to_delete.append(k)
for k in k_to_delete:
if uns_is_not_key:
del ddata[k]
else:
del ddata[][k]
X = d_true_keys[]
obs = d_true_keys[]
obsm = d_true_keys[]
var = d_true_keys[]
varm = d_true_keys[]
layers = d_true_keys[]
raw = None
if in ddata:
raw = {}
raw[] = ddata[]
del ddata[]
raw[] = pd.DataFrame.from_records(
ddata[], index=)
del ddata[]
raw[].index = raw[].index.astype()
for c in raw[].columns:
if is_string_dtype(raw[][c]):
raw[][c] = pd.Index(raw[][c]).astype().values
if in ddata:
else {}
)
return X, obs, var, uns, obsm, varm, layers, raw | Allows to construct an instance of AnnData from a dictionary.
Acts as interface for the communication with the hdf5 file.
In particular, from a dict that has been written using
``AnnData._to_dict_fixed_width_arrays``. |
20,731 | def copy(self, new_name=None):
_tmp = copy.deepcopy(self)
if not new_name:
new_name = self.name +
if str(type(self)) == "<class >":
_tmp.meta.note(.format(
new=new_name, old=self.meta.name))
_tmp.meta.change_meta(, new_name, log=False)
else:
_tmp.name = new_name
return _tmp | Returns a deep copy of the system
Parameters
-----------
new_name: str, optional
Set a new meta name parameter.
Default: <old_name>_copy |
20,732 | def add(self, key, value, time, compress_level=-1):
return self._set_add_replace(, key, value, time, compress_level=compress_level) | Add a key/value to server ony if it does not exist.
:param key: Key's name
:type key: six.string_types
:param value: A value to be stored on server.
:type value: object
:param time: Time in seconds that your key will expire.
:type time: int
:param compress_level: How much to compress.
0 = no compression, 1 = fastest, 9 = slowest but best,
-1 = default compression level.
:type compress_level: int
:return: True if key is added False if key already exists
:rtype: bool |
20,733 | def convert_to_str(d):
d2 = {}
for k, v in d.items():
k = str(k)
if type(v) in [list, tuple]:
d2[k] = [str(a) for a in v]
elif type(v) is dict:
d2[k] = convert_to_str(v)
else:
d2[k] = str(v)
return d2 | Recursively convert all values in a dictionary to strings
This is required because setup() does not like unicode in
the values it is supplied. |
20,734 | def evaluate_binop_logical(self, operation, left, right, **kwargs):
if not operation in self.binops_logical:
raise ValueError("Invalid logical binary operation ".format(operation))
result = self.binops_logical[operation](left, right)
return bool(result) | Evaluate given logical binary operation with given operands. |
20,735 | def fail_remaining(self):
self._failed.update(self._graph.nodes)
self._graph = Graph()
self._running = set() | Mark all unfinished tasks (including currently running ones) as
failed. |
20,736 | def boxcox_trans(p, **kwargs):
if np.abs(p) < 1e-7:
return log_trans()
def transform(x):
return (x**p - 1) / (p * np.sign(x-1))
def inverse(x):
return (np.abs(x) * p + np.sign(x)) ** (1 / p)
kwargs[] = p
kwargs[] = kwargs.get(, .format(p))
kwargs[] = transform
kwargs[] = inverse
return trans_new(**kwargs) | Boxcox Transformation
Parameters
----------
p : float
Power parameter, commonly denoted by
lower-case lambda in formulae
kwargs : dict
Keyword arguments passed onto
:func:`trans_new`. Should not include
the `transform` or `inverse`. |
20,737 | def process_data(key, data_list, result_info_key, identifier_keys):
master_data = []
for item_data in data_list:
data = item_data[key]
if data is None:
current_item_data = {}
else:
if key == :
current_item_data = data[]
elif key == :
top_level_keys = [, ]
current_item_data = flatten_top_level_keys(data, top_level_keys)
elif key == :
current_item_data = data[]
school_list = []
for school_type_key in current_item_data:
schools = current_item_data[school_type_key]
for school in schools:
school[] = school_type_key
school[] = school[]
school[] = school[]
school_list.append(school)
current_item_data = school_list
elif key == :
current_item_data = {}
for month_key in data:
current_item_data[month_key] = data[month_key][]
elif key in [, ]:
current_item_data = flatten_top_level_keys(data, [
,
,
,
])
elif key in [, ]:
top_level_keys = [, ]
current_item_data = flatten_top_level_keys(data, top_level_keys)
else:
current_item_data = data
if isinstance(current_item_data, dict):
_set_identifier_fields(current_item_data, item_data, result_info_key, identifier_keys)
master_data.append(current_item_data)
else:
for item in current_item_data:
_set_identifier_fields(item, item_data, result_info_key, identifier_keys)
master_data.extend(current_item_data)
return master_data | Given a key as the endpoint name, pulls the data for that endpoint out
of the data_list for each address, processes the data into a more
excel-friendly format and returns that data.
Args:
key: the endpoint name of the data to process
data_list: the main data list to take the data from
result_info_key: the key in api_data dicts that contains the data results
identifier_keys: the list of keys used as requested identifiers
(address, zipcode, block_id, etc)
Returns:
A list of dicts (rows) to be written to a worksheet |
20,738 | async def add(self, key, value, ttl=SENTINEL, dumps_fn=None, namespace=None, _conn=None):
start = time.monotonic()
dumps = dumps_fn or self._serializer.dumps
ns_key = self.build_key(key, namespace=namespace)
await self._add(ns_key, dumps(value), ttl=self._get_ttl(ttl), _conn=_conn)
logger.debug("ADD %s %s (%.4f)s", ns_key, True, time.monotonic() - start)
return True | Stores the value in the given key with ttl if specified. Raises an error if the
key already exists.
:param key: str
:param value: obj
:param ttl: int the expiration time in seconds. Due to memcached
restrictions if you want compatibility use int. In case you
need miliseconds, redis and memory support float ttls
:param dumps_fn: callable alternative to use as dumps function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True if key is inserted
:raises:
- ValueError if key already exists
- :class:`asyncio.TimeoutError` if it lasts more than self.timeout |
20,739 | def describe_enum_value(enum_value):
enum_value_descriptor = EnumValueDescriptor()
enum_value_descriptor.name = six.text_type(enum_value.name)
enum_value_descriptor.number = enum_value.number
return enum_value_descriptor | Build descriptor for Enum instance.
Args:
enum_value: Enum value to provide descriptor for.
Returns:
Initialized EnumValueDescriptor instance describing the Enum instance. |
20,740 | def set_description(self, vrf_name, description=None, default=False,
disable=False):
cmds = self.command_builder(, value=description,
default=default, disable=disable)
return self.configure_vrf(vrf_name, cmds) | Configures the VRF description
Args:
vrf_name (str): The VRF name to configure
description(str): The string to set the vrf description to
default (bool): Configures the vrf description to its default value
disable (bool): Negates the vrf description
Returns:
True if the operation was successful otherwise False |
20,741 | def register_custom_adapter(cls, target_class, adapter):
class_name = target_class.__name__
if adapter.can_serialize():
cls._custom_serializers[class_name] = adapter
if adapter.can_deserialize():
cls._custom_deserializers[class_name] = adapter | :type target_class: type
:type adapter: JsonAdapter|type
:rtype: None |
20,742 | def sample(self, num_rows=1):
self.check_fit()
res = {}
means = np.zeros(self.covariance.shape[0])
size = (num_rows,)
clean_cov = np.nan_to_num(self.covariance)
samples = np.random.multivariate_normal(means, clean_cov, size=size)
for i, (label, distrib) in enumerate(self.distribs.items()):
cdf = stats.norm.cdf(samples[:, i])
res[label] = distrib.percent_point(cdf)
return pd.DataFrame(data=res) | Creates sintentic values stadistically similar to the original dataset.
Args:
num_rows: `int` amount of samples to generate.
Returns:
np.ndarray: Sampled data. |
20,743 | def create_panel_of_normals(items, group_id, work_dir):
out_file = os.path.join(work_dir, "%s-%s-pon.hdf5" % (dd.get_sample_name(items[0]), group_id))
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
params = ["-T", "CreateReadCountPanelOfNormals",
"-O", tx_out_file,
"--annotated-intervals", tz.get_in(["regions", "bins", "gcannotated"], items[0])]
for data in items:
params += ["-I", tz.get_in(["depth", "bins", "target"], data)]
_run_with_memory_scaling(params, tx_out_file, items[0], ld_preload=True)
return out_file | Create a panel of normals from one or more background read counts. |
20,744 | def restore(self):
if self.proxy_object is None:
if self.getter:
setattr(self.getter_class, self.attr_name, self.getter)
elif self.is_local:
setattr(self.orig_object, self.attr_name, self.orig_value)
else:
delattr(self.orig_object, self.attr_name)
else:
setattr(sys.modules[self.orig_object.__module__],
self.orig_object.__name__,
self.orig_object) | Restore the saved value for the attribute of the object. |
20,745 | def render(self, size):
middle, corner, side, foreColor, backColor = self.decode(self.code)
size = int(size)
image = Image.new("RGB", (size * 3, size * 3))
draw = ImageDraw.Draw(image)
draw.rectangle((0, 0, image.size[0], image.size[1]), fill=0)
kwds = {
: draw,
: size,
: foreColor,
: backColor}
self.drawPatch((1, 1), middle[2], middle[1], middle[0], **kwds)
kwds[] = side[0]
for i in range(4):
pos = [(1, 0), (2, 1), (1, 2), (0, 1)][i]
self.drawPatch(pos, side[2] + 1 + i, side[1], **kwds)
kwds[] = corner[0]
for i in range(4):
pos = [(0, 0), (2, 0), (2, 2), (0, 2)][i]
self.drawPatch(pos, corner[2] + 1 + i, corner[1], **kwds)
return image | render identicon to PIL.Image
@param size identicon patchsize. (image size is 3 * [size])
@return PIL.Image |
20,746 | def average(var, key, N):
global average_data
if not key in average_data:
average_data[key] = [var]*N
return var
average_data[key].pop(0)
average_data[key].append(var)
return sum(average_data[key])/N | average over N points |
20,747 | def get_comments(self, project, work_item_id, top=None, continuation_token=None, include_deleted=None, expand=None, order=None):
route_values = {}
if project is not None:
route_values[] = self._serialize.url(, project, )
if work_item_id is not None:
route_values[] = self._serialize.url(, work_item_id, )
query_parameters = {}
if top is not None:
query_parameters[] = self._serialize.query(, top, )
if continuation_token is not None:
query_parameters[] = self._serialize.query(, continuation_token, )
if include_deleted is not None:
query_parameters[] = self._serialize.query(, include_deleted, )
if expand is not None:
query_parameters[] = self._serialize.query(, expand, )
if order is not None:
query_parameters[] = self._serialize.query(, order, )
response = self._send(http_method=,
location_id=,
version=,
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize(, response) | GetComments.
[Preview API] Returns a list of work item comments, pageable.
:param str project: Project ID or project name
:param int work_item_id: Id of a work item to get comments for.
:param int top: Max number of comments to return.
:param str continuation_token: Used to query for the next page of comments.
:param bool include_deleted: Specify if the deleted comments should be retrieved.
:param str expand: Specifies the additional data retrieval options for work item comments.
:param str order: Order in which the comments should be returned.
:rtype: :class:`<CommentList> <azure.devops.v5_1.work-item-tracking.models.CommentList>` |
20,748 | def get_small_image_url(self, page=1):
template = self.resources.page.get()
return template.replace(
"{page}",
str(page)
).replace("{size}", "small") | Returns the URL for the small sized image of a single page.
The page kwarg specifies which page to return. One is the default. |
20,749 | def make_relationship_aggregate(self, relationship):
if not self._session.IS_MANAGING_BACKREFERENCES:
relationship.direction &= ~RELATIONSHIP_DIRECTIONS.REVERSE
return RelationshipAggregate(self, relationship) | Returns a new relationship aggregate for the given relationship.
:param relationship: Instance of
:class:`everest.entities.relationship.DomainRelationship`. |
20,750 | def put_account(self, headers=None, query=None, cdn=False, body=None):
return self.request(
, , body or , headers, query=query, cdn=cdn) | PUTs the account and returns the results. This is usually
done with the extract-archive bulk upload request and has no
other use I know of (but the call is left open in case there
ever is).
:param headers: Additional headers to send with the request.
:param query: Set to a dict of query values to send on the
query string of the request.
:param cdn: If set True, the CDN management interface will be
used.
:param body: Some account PUT requests, like the
extract-archive bulk upload request, take a body.
:returns: A tuple of (status, reason, headers, contents).
:status: is an int for the HTTP status code.
:reason: is the str for the HTTP status (ex: "Ok").
:headers: is a dict with all lowercase keys of the HTTP
headers; if a header has multiple values, it will be a
list.
:contents: is the str for the HTTP body. |
20,751 | def PC_AC1_calc(P, TOP, POP):
try:
result = 0
classes = list(P.keys())
for i in classes:
pi = ((P[i] + TOP[i]) / (2 * POP[i]))
result += pi * (1 - pi)
result = result / (len(classes) - 1)
return result
except Exception:
return "None" | Calculate percent chance agreement for Gwet's AC1.
:param P: condition positive
:type P : dict
:param TOP: test outcome positive
:type TOP : dict
:param POP: population
:type POP:dict
:return: percent chance agreement as float |
20,752 | def get_template(cls, message, messenger):
template = message.context.get(, None)
if template:
return template
if cls.template is None:
cls.template = % (
cls.get_alias(), messenger.get_alias(), cls.template_ext
)
return cls.template | Get a template path to compile a message.
1. `tpl` field of message context;
2. `template` field of message class;
3. deduced from message, messenger data and `template_ext` message type field
(e.g. `sitemessage/messages/plain__smtp.txt` for `plain` message type).
:param Message message: Message model
:param MessengerBase messenger: a MessengerBase heir
:return: str
:rtype: str |
20,753 | def remove_temp_copy(self):
if self.is_temp and self.root_dir is not None:
shutil.rmtree(self.root_dir)
self.root_dir = None | Removes a temporary copy of the MAGICC version shipped with Pymagicc. |
20,754 | def decipher(self,string):
string = self.remove_punctuation(string)
if len(string)%2 == 1: string = string +
ret =
for c in range(0,len(string.upper()),2):
a,b = self.decipher_pair(string[c],string[c+1])
ret += a + b
return ret | Decipher string using Foursquare cipher according to initialised key. Punctuation and whitespace
are removed from the input. The ciphertext should be an even number of characters. If the input ciphertext is not an even number of characters, an 'X' will be appended.
Example::
plaintext = Foursquare(key1='zgptfoihmuwdrcnykeqaxvsbl',key2='mfnbdcrhsaxyogvituewlqzkp').decipher(ciphertext)
:param string: The string to decipher.
:returns: The deciphered string. |
20,755 | def linkify(self):
for t_id in self.items:
timeperiod = self.items[t_id]
timeperiod.linkify(self) | Check exclusion for each timeperiod
:return: None |
20,756 | def push(self, kv):
if kv[0] in self:
self.__delitem__(kv[0])
self.__setitem__(kv[0], kv[1]) | Adds a new item from the given (key, value)-tuple.
If the key exists, pushes the updated item to the head of the dict. |
20,757 | def run_command_on_marathon_leader(
command,
username=None,
key_path=None,
noisy=True
):
return run_command(shakedown.marathon_leader_ip(), command, username, key_path, noisy) | Run a command on the Marathon leader |
20,758 | def ToByteArray(self):
ms = StreamManager.GetStream()
writer = BinaryWriter(ms)
self.Serialize(writer)
retval = ms.ToArray()
StreamManager.ReleaseStream(ms)
return retval | Serialize self and get the byte stream.
Returns:
bytes: serialized object. |
20,759 | def parse_sentence(self, string):
result = []
def shift(string_):
string_ = string_.strip()
if not string_:
return ,
i = string_.find()
if i == -1:
command_ = string_
string_ =
else:
command_ = string_[:i]
string_ = string_[i:]
return command_, string_
command, string = shift(string)
while command != :
result += self.token(command) | Parses the given sentence. BASIC commands must be
types UPPERCASE and as SEEN in ZX BASIC. e.g. GO SUB for gosub, etc... |
20,760 | def enterEvent( self, event ):
item = self.trackerItem()
if ( item ):
item.setVisible(True) | Toggles the display for the tracker item. |
20,761 | def _check_std(self, paths, cmd_pieces):
cmd_pieces.extend(paths)
process = Popen(cmd_pieces, stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
lines = out.strip().splitlines() + err.strip().splitlines()
result = []
for line in lines:
match = self.tool_err_re.match(line)
if not match:
if self.break_on_tool_re_mismatch:
raise ValueError(
% (
.join(cmd_pieces),
paths,
line))
continue
vals = match.groupdict()
vals[] = int(vals[])
vals[] = \
int(vals[]) if vals[] is not None else
result.append(vals)
return result | Run `cmd` as a check on `paths`. |
20,762 | def _get_pydot(self):
if self.pydot:
return self.pydot
self.pydot = __import__("pydot")
return self.pydot | Return pydot package. Load pydot, if necessary. |
20,763 | def add_response_headers(h):
def headers_wrapper(fun):
def wrapped_function(*args, **kwargs):
response = fun(*args, **kwargs)
for k, v in h.iteritems():
response[k] = v
return response
return wrapped_function
return headers_wrapper | Add HTTP-headers to response.
Example:
@add_response_headers({'Refresh': '10', 'X-Powered-By': 'Django'})
def view(request):
.... |
20,764 | def clear_jobs():
if not is_authorized():
return json.dumps({: }), 403, headers
days = flask.request.args.get(, None)
return _clear_jobs(days) | Clear old jobs
:param days: Jobs for how many days should be kept (default: 10)
:type days: integer
:statuscode 200: no error
:statuscode 403: not authorized to delete jobs
:statuscode 409: an error occurred |
20,765 | def bootstrap_app():
from salt.netapi.rest_cherrypy import app
import salt.config
__opts__ = salt.config.client_config(
os.environ.get(, ))
return app.get_app(__opts__) | Grab the opts dict of the master config by trying to import Salt |
20,766 | def show_text_glyphs(self, text, glyphs, clusters, cluster_flags=0):
glyphs = ffi.new(, glyphs)
clusters = ffi.new(, clusters)
cairo.cairo_show_text_glyphs(
self._pointer, _encode_string(text), -1,
glyphs, len(glyphs), clusters, len(clusters), cluster_flags)
self._check_status() | This operation has rendering effects similar to :meth:`show_glyphs`
but, if the target surface supports it
(see :meth:`Surface.has_show_text_glyphs`),
uses the provided text and cluster mapping
to embed the text for the glyphs shown in the output.
If the target does not support the extended attributes,
this method acts like the basic :meth:`show_glyphs`
as if it had been passed :obj:`glyphs`.
The mapping between :obj:`text` and :obj:`glyphs`
is provided by an list of clusters.
Each cluster covers a number of UTF-8 text bytes and glyphs,
and neighboring clusters cover neighboring areas
of :obj:`text` and :obj:`glyphs`.
The clusters should collectively cover :obj:`text` and :obj:`glyphs`
in entirety.
:param text:
The text to show, as an Unicode or UTF-8 string.
Because of how :obj:`clusters` work,
using UTF-8 bytes might be more convenient.
:param glyphs:
A list of glyphs.
Each glyph is a ``(glyph_id, x, y)`` tuple.
:obj:`glyph_id` is an opaque integer.
Its exact interpretation depends on the font technology being used.
:obj:`x` and :obj:`y` are the float offsets
in the X and Y direction
between the origin used for drawing or measuring the string
and the origin of this glyph.
Note that the offsets are not cumulative.
When drawing or measuring text,
each glyph is individually positioned
with respect to the overall origin.
:param clusters:
A list of clusters.
A text cluster is a minimal mapping of some glyphs
corresponding to some UTF-8 text,
represented as a ``(num_bytes, num_glyphs)`` tuple of integers,
the number of UTF-8 bytes and glyphs covered by the cluster.
For a cluster to be valid,
both :obj:`num_bytes` and :obj:`num_glyphs` should be non-negative,
and at least one should be non-zero.
Note that clusters with zero glyphs
are not as well supported as normal clusters.
For example, PDF rendering applications
typically ignore those clusters when PDF text is being selected.
:type cluster_flags: int
:param cluster_flags:
Flags (as a bit field) for the cluster mapping.
The first cluster always covers bytes
from the beginning of :obj:`text`.
If :obj:`cluster_flags` does not have
the :obj:`TEXT_CLUSTER_FLAG_BACKWARD` flag set,
the first cluster also covers the beginning of :obj:`glyphs`,
otherwise it covers the end of the :obj:`glyphs` list
and following clusters move backward. |
20,767 | def orthogonal_basis(self):
if self.dim == 3:
x_arr = np.array([-self.data[1], self.data[0], 0])
if np.linalg.norm(x_arr) == 0:
x_arr = np.array([self.data[2], 0, 0])
x_arr = x_arr / np.linalg.norm(x_arr)
y_arr = np.cross(self.data, x_arr)
return Direction(x_arr, frame=self.frame), Direction(y_arr, frame=self.frame)
raise NotImplementedError() | Return an orthogonal basis to this direction.
Note
----
Only implemented in 3D.
Returns
-------
:obj:`tuple` of :obj:`Direction`
The pair of normalized Direction vectors that form a basis of
this direction's orthogonal complement.
Raises
------
NotImplementedError
If the vector is not 3D |
20,768 | def get_name_addr(value):
name_addr = NameAddr()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found ".format(leader))
if value[0] != :
if value[0] in PHRASE_ENDS:
raise errors.HeaderParseError(
"expected name-addr but found ".format(value))
token, value = get_display_name(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found ".format(token))
if leader is not None:
token[0][:0] = [leader]
leader = None
name_addr.append(token)
token, value = get_angle_addr(value)
if leader is not None:
token[:0] = [leader]
name_addr.append(token)
return name_addr, value | name-addr = [display-name] angle-addr |
20,769 | def fill_auth_list(self, auth_provider, name, groups, auth_list=None, permissive=None):
if auth_list is None:
auth_list = []
if permissive is None:
permissive = self.opts.get()
name_matched = False
for match in auth_provider:
if match == and not permissive:
continue
if match.endswith():
if match.rstrip() in groups:
auth_list.extend(auth_provider[match])
else:
if salt.utils.stringutils.expr_match(match, name):
name_matched = True
auth_list.extend(auth_provider[match])
if not permissive and not name_matched and in auth_provider:
auth_list.extend(auth_provider[])
return auth_list | Returns a list of authorisation matchers that a user is eligible for.
This list is a combination of the provided personal matchers plus the
matchers of any group the user is in. |
20,770 | def conformPadding(cls, chars):
pad = chars
if pad and pad[0] not in PAD_MAP:
pad = cls.getPaddingChars(cls.getPaddingNum(pad))
return pad | Ensure alternate input padding formats are conformed
to formats defined in PAD_MAP
If chars is already a format defined in PAD_MAP, then
it is returned unmodified.
Example::
'#' -> '#'
'@@@@' -> '@@@@'
'%04d' -> '#'
Args:
chars (str): input padding chars
Returns:
str: conformed padding chars
Raises:
ValueError: If chars contains invalid padding characters |
20,771 | def port_create(request, network_id, **kwargs):
LOG.debug("port_create(): netid=%(network_id)s, kwargs=%(kwargs)s",
{: network_id, : kwargs})
kwargs = unescape_port_kwargs(**kwargs)
body = {: {: network_id}}
if not in kwargs:
kwargs[] = request.user.project_id
body[].update(kwargs)
port = neutronclient(request).create_port(body=body).get()
return Port(port) | Create a port on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param device_id: (optional) device id attached to the port
:param tenant_id: (optional) tenant id of the port created
:param name: (optional) name of the port created
:returns: Port object |
20,772 | def snapshot(self):
return dict((category, dict((name, metric.value())
for name, metric in list(metrics.items())))
for category, metrics in
list(self._store.items())) | Return a nested dictionary snapshot of all metrics and their
values at this time. Example:
{
'category': {
'metric1_name': 42.0,
'metric2_name': 'foo'
}
} |
20,773 | def vn_info(call=None, kwargs=None):
if call != :
raise SaltCloudSystemExit(
)
if kwargs is None:
kwargs = {}
name = kwargs.get(, None)
vn_id = kwargs.get(, None)
if vn_id:
if name:
log.warning(
vn_id\name\
vn_id\
)
elif name:
vn_id = get_vn_id(kwargs={: name})
else:
raise SaltCloudSystemExit(
name\vn_id\
)
server, user, password = _get_xml_rpc()
auth = .join([user, password])
response = server.one.vn.info(auth, int(vn_id))
if response[0] is False:
return response[1]
else:
info = {}
tree = _get_xml(response[1])
info[tree.find().text] = _xml_to_dict(tree)
return info | Retrieves information for the virtual network.
.. versionadded:: 2016.3.0
name
The name of the virtual network for which to gather information. Can be
used instead of ``vn_id``.
vn_id
The ID of the virtual network for which to gather information. Can be
used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f vn_info opennebula vn_id=3
salt-cloud --function vn_info opennebula name=public |
20,774 | def _render_headers(self):
headers = getattr(self, , ())
for index, col in enumerate(headers):
cell = self.worksheet.cell(row=1, column=index + 1)
cell.value = col[]
index += 1
extra_headers = getattr(self, , ())
for add_index, col in enumerate(extra_headers):
cell = self.worksheet.cell(row=1, column=add_index + index + 1)
cell.value = col[] | Write the headers row |
20,775 | async def close(self) -> None:
if not self.closed:
if self._connector is not None and self._connector_owner:
await self._connector.close()
self._connector = None | Close underlying connector.
Release all acquired resources. |
20,776 | def write(self, obj):
if self.verbose:
self._warnings("cache miss for {0}", self._cache_id_desc())
if self._start_time is not None:
elapsed = get_time() - self._start_time
else:
elapsed = None
out = self._write(self._cache_id_obj, elapsed, obj)
self._out = out
self.force_to_disk(self.get_size() > self._ram_quota)
self._last_access = get_time()
return self._read(out)[2] | Writes the given object to the cache file as pickle. The cache file with
its path is created if needed. |
20,777 | def get_clan_tracking(self, *tags: crtag, **params: keys):
url = self.api.CLAN + + .join(tags) +
return self._get_model(url, **params) | Returns if the clan is currently being tracked
by the API by having either cr-api.com or royaleapi.com
in the clan description
Parameters
----------
\*tags: str
Valid clan tags. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout |
20,778 | def timed_rotating_file_handler(name, logname, filename, when=,
interval=1, backupCount=0,
encoding=None, delay=False, utc=False):
return wrap_log_handler(logging.handlers.TimedRotatingFileHandler(
filename, when=when, interval=interval, backupCount=backupCount,
encoding=encoding, delay=delay, utc=utc)) | A Bark logging handler logging output to a named file. At
intervals specified by the 'when', the file will be rotated, under
control of 'backupCount'.
Similar to logging.handlers.TimedRotatingFileHandler. |
20,779 | def designator(self):
version = str(self.version)
return .join((version, self.error) if self.error else (version,)) | \
Returns the version and error correction level as string `V-E` where
`V` represents the version number and `E` the error level. |
20,780 | def dump_yaml(data, Dumper=_Dumper, default_flow_style=False):
content = yaml.dump(data,
default_flow_style=default_flow_style,
Dumper=Dumper)
return content.strip() | Returns data as yaml-formatted string. |
20,781 | def delete_refresh_token(self, refresh_token):
access_token = self.fetch_by_refresh_token(refresh_token)
self.delete(access_token.token) | Deletes a refresh token after use
:param refresh_token: The refresh token to delete. |
20,782 | def is_none_or(self):
if self._subject is None:
return NoOpInspector(subject=self._subject, error_factory=self._error_factory)
else:
return self | Ensures :attr:`subject` is either ``None``, or satisfies subsequent (chained) conditions::
Ensure(None).is_none_or.is_an(int) |
20,783 | def parse_signature(signature):
if " -> " not in signature:
param_types, return_type = None, signature.strip()
else:
lhs, return_type = [s.strip() for s in signature.split(" -> ")]
csv = lhs[1:-1].strip()
param_types = split_parameter_types(csv)
requires = set(_RE_QUALIFIED_TYPES.findall(signature))
return param_types, return_type, requires | Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types. |
20,784 | def listar_por_equipamento(self, id_equipment):
if not is_valid_int_param(id_equipment):
raise InvalidParameterError(
u)
url = + str(id_equipment) +
code, map = self.submit(None, , url)
key =
return get_list_map(self.response(code, map, [key]), key) | List all Script related Equipment.
:param id_equipment: Identifier of the Equipment. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{script': [{‘id’: < id >,
‘nome’: < nome >,
‘descricao’: < descricao >,
‘id_tipo_roteiro’: < id_tipo_roteiro >,
‘nome_tipo_roteiro’: < nome_tipo_roteiro >,
‘descricao_tipo_roteiro’: < descricao_tipo_roteiro >}, ...more Script...]}
:raise InvalidParameterError: The identifier of Equipment is null and invalid.
:raise EquipamentoNaoExisteError: Equipment not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. |
20,785 | def prev_img_ws(self, ws, loop=True):
channel = self.get_active_channel_ws(ws)
if channel is None:
return
channel.prev_image()
return True | Go to the previous image in the focused channel in the workspace. |
20,786 | def visualize_cloud_of_words(dictionary, image_path=None):
from PIL import Image
if image_path is not None:
mask = np.array(Image.open(image_path))
wc = WordCloud(mask=mask, background_color=, width=1600, height=1200, prefer_horizontal=0.8)
wc = wc.generate_from_frequencies(dictionary)
else:
wc = WordCloud(background_color=, width=1600, height=1200, prefer_horizontal=0.8)
wc = wc.generate_from_frequencies(dictionary)
import matplotlib.pyplot as plt
plt.rcParams[] = (15, 15)
plt.imshow(wc, interpolation=)
plt.axis("off")
plt.show() | Renders the cloud of words representation for a given dictionary of frequencies
:param dictionary: the dictionary object that contains key-frequency pairs
:param image_path: the path to the image mask, None if no masking is needed |
20,787 | def cause_effect_info(self, mechanism, purview):
return min(self.cause_info(mechanism, purview),
self.effect_info(mechanism, purview)) | Return the cause-effect information for a mechanism over a purview.
This is the minimum of the cause and effect information. |
20,788 | def combs(a, r):
if r == 0:
return np.asarray([])
a = np.asarray(a)
data_type = a.dtype if r == 0 else np.dtype([(, a.dtype)] * r)
b = np.fromiter(combinations(a, r), data_type)
return b.view(a.dtype).reshape(-1, r) | NumPy implementation of ``itertools.combinations``.
Return successive ``r``-length combinations of elements in the array ``a``.
Args:
a (np.ndarray): The array from which to get combinations.
r (int): The length of the combinations.
Returns:
np.ndarray: An array of combinations. |
20,789 | def fit_linear(X, y):
model = linear_model.LinearRegression()
model.fit(X, y)
return model | Uses OLS to fit the regression. |
20,790 | def unpack_4to8(data):
tmpdata = data.astype(np.int16)
tmpdata = (tmpdata | (tmpdata << 4)) & 0x0F0F
updata = tmpdata.byteswap()
return updata.view(data.dtype) | Promote 2-bit unisgned data into 8-bit unsigned data.
Args:
data: Numpy array with dtype == uint8
Notes:
# The process is this:
# ABCDEFGH [Bits of one 4+4-bit value]
# 00000000ABCDEFGH [astype(uint16)]
# 0000ABCDEFGH0000 [<< 4]
# 0000ABCDXXXXEFGH [bitwise 'or' of previous two lines]
# 0000111100001111 [0x0F0F]
# 0000ABCD0000EFGH [bitwise 'and' of previous two lines]
# ABCD0000EFGH0000 [<< 4]
# which effectively pads the two 4-bit values with zeros on the right
# Note: This technique assumes LSB-first ordering |
20,791 | def evert(iterable: Iterable[Dict[str, Tuple]]) -> Iterable[Iterable[Dict[str, Any]]]:
aaas tuple elements.
Parameters
----------
:``iterable``: list of dictionaries whose values are tuples
Return Value(s)
---------------
All combinations of the choices in the dictionaries.
'
keys = list(itertools.chain.from_iterable([ _.keys() for _ in iterable ]))
for values in itertools.product(*[ list(*_.values()) for _ in iterable ]):
yield [ dict(( pair, )) for pair in zip(keys, values) ] | Evert dictionaries with tuples.
Iterates over the list of dictionaries and everts them with their tuple
values. For example:
``[ { 'a': ( 1, 2, ), }, ]``
becomes
``[ ( { 'a': 1, }, ), ( { 'a', 2, }, ) ]``
The resulting iterable contains the same number of tuples as the
initial iterable had tuple elements. The number of dictionaries is the same
as the cartesian product of the initial iterable's tuple elements.
Parameters
----------
:``iterable``: list of dictionaries whose values are tuples
Return Value(s)
---------------
All combinations of the choices in the dictionaries. |
20,792 | def plot(self, entity):
df = self._binary_df[[entity]]
resampled = df.resample("s").ffill()
resampled.columns = ["value"]
fig, ax = plt.subplots(1, 1, figsize=(16, 2))
ax.fill_between(resampled.index, y1=0, y2=1, facecolor="royalblue", label="off")
ax.fill_between(
resampled.index,
y1=0,
y2=1,
where=(resampled["value"] > 0),
facecolor="red",
label="on",
)
ax.set_title(entity)
ax.set_xlabel("Date")
ax.set_frame_on(False)
ax.set_yticks([])
plt.legend(loc=(1.01, 0.7))
plt.show()
return | Basic plot of a single binary sensor data.
Parameters
----------
entity : string
The entity to plot |
20,793 | def execute_command(working_dir, cmd, env_dict):
proc_env = os.environ.copy()
proc_env["PATH"] = "{}:{}:.".format(proc_env["PATH"], working_dir)
proc_env.update(env_dict)
proc = subprocess.Popen(
cmd,
cwd=working_dir,
env=proc_env,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
status = proc.wait()
stdout, stderr = proc.communicate()
if status:
msg = (
"Non zero {} exit from command {}\n"
"Stdout: {}\n"
"Stderr: {}\n"
).format(status, cmd, stdout, stderr)
LOGGER.error(msg)
raise RuntimeError(msg)
LOGGER.info(stdout) | execute_command: run the command provided in the working dir
specified adding the env_dict settings to the
execution environment
:param working_dir: path to directory to execute command
also gets added to the PATH
:param cmd: Shell command to execute
:param env_dict: dictionary of additional env vars to
be passed to the subprocess environment |
20,794 | def add_proxy_to(self, parent, name, multiplicity=Multiplicity.ONE_MANY, **kwargs):
return self._client.create_proxy_model(self, parent, name, multiplicity, **kwargs) | Add this model as a proxy to another parent model.
This will add the current model as a proxy model to another parent model. It ensure that it will copy the
whole subassembly to the 'parent' model.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param name: Name of the new proxy model
:type name: basestring
:param parent: parent of the to be proxied model
:type parent: :class:`Part`
:param multiplicity: the multiplicity of the new proxy model (default ONE_MANY)
:type multiplicity: basestring or None
:param kwargs: (optional) additional kwargs that will be passed in the during the edit/update request
:type kwargs: dict or None
:return: the new proxied :class:`Part`.
:raises APIError: in case an Error occurs
Examples
--------
>>> from pykechain.enums import Multiplicity
>>> bike_model = project.model('Bike')
# find the catalog model container, the highest parent to create catalog models under
>>> catalog_model_container = project.model('Catalog container')
>>> new_wheel_model = project.create_model(catalog_model_container, 'Wheel Catalog',
... multiplicity=Multiplicity.ZERO_MANY)
>>> new_wheel_model.add_proxy_to(bike_model, "Wheel", multiplicity=Multiplicity.ONE_MANY) |
20,795 | def p_arglist(p):
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]
p[0].append(p[3]) | arglist : arg
| arglist COMMA arg |
20,796 | def _defaultdict(dct, fallback=_illegal_character):
out = defaultdict(lambda: fallback)
for k, v in six.iteritems(dct):
out[k] = v
return out | Wraps the given dictionary such that the given fallback function will be called when a nonexistent key is
accessed. |
20,797 | def merge(self, commit_message=, sha=None):
parameters = {: commit_message}
if sha:
parameters[] = sha
url = self._build_url(, base_url=self._api)
json = self._json(self._put(url, data=dumps(parameters)), 200)
self.merge_commit_sha = json[]
return json[] | Merge this pull request.
:param str commit_message: (optional), message to be used for the
merge commit
:returns: bool |
20,798 | def parse_type(defn, preprocess=True):
if pycparser is None:
raise ImportError("Please install pycparser in order to parse C definitions")
defn = + defn.strip() +
if preprocess:
defn = do_preprocess(defn)
node = pycparser.c_parser.CParser().parse(make_preamble()[0] + defn)
if not isinstance(node, pycparser.c_ast.FileAST) or \
not isinstance(node.ext[-1], pycparser.c_ast.Typedef):
raise ValueError("Something went horribly wrong using pycparser")
decl = node.ext[-1].type
return _decl_to_type(decl) | Parse a simple type expression into a SimType
>>> parse_type('int *') |
20,799 | def classify_harmonic(self, partial_labels, use_CMN=True):
labels = np.array(partial_labels, copy=True)
unlabeled = labels == -1
fl, classes = _onehot(labels[~unlabeled])
L = self.laplacian(normed=False)
if ss.issparse(L):
L = L.tocsr()[unlabeled].toarray()
else:
L = L[unlabeled]
Lul = L[:,~unlabeled]
Luu = L[:,unlabeled]
fu = -np.linalg.solve(Luu, Lul.dot(fl))
if use_CMN:
scale = (1 + fl.sum(axis=0)) / fu.sum(axis=0)
fu *= scale
labels[unlabeled] = classes[fu.argmax(axis=1)]
return labels | Harmonic function method for semi-supervised classification,
also known as the Gaussian Mean Fields algorithm.
partial_labels: (n,) array of integer labels, -1 for unlabeled.
use_CMN : when True, apply Class Mass Normalization
From "Semi-Supervised Learning Using Gaussian Fields and Harmonic Functions"
by Zhu, Ghahramani, and Lafferty in 2003.
Based on the matlab code at:
http://pages.cs.wisc.edu/~jerryzhu/pub/harmonic_function.m |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.