Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
375,600 | def add_to_buffer(self, content, read_position):
self.read_position = read_position
if self.read_buffer is None:
self.read_buffer = content
else:
self.read_buffer = content + self.read_buffer | Add additional bytes content as read from the read_position.
Args:
content (bytes): data to be added to buffer working BufferWorkSpac.
read_position (int): where in the file pointer the data was read from. |
375,601 | def serve_get(self, path, **params):
if path is None: return None
matched = self._match_path(path, self.get_registrations)
if matched is None:
return None
else:
return matched(path, **params) | Find a GET callback for the given HTTP path, call it and return the
results. The callback is called with two arguments, the path used to
match it, and params which include the BaseHTTPRequestHandler instance.
The callback must return a tuple:
(code, content, content_type)
If multiple registrations match the path, the one with the longest
matching text will be used. Matches are always anchored at the start
of the path.
None is returned if no registered callback is willing to handle a path. |
375,602 | def __stringify_predicate(predicate):
funname = getsource(predicate).strip().split()[2].rstrip()
params =
if not in funname:
stack = getouterframes(currentframe())
for frame in range(0, len(stack)):
if funname in str(stack[frame]):
_, _, _, params = getargvalues(stack[frame][0])
return "function: {} params: {}".format(funname, params) | Reflection of function name and parameters of the predicate being used. |
375,603 | def newPage(doc, pno=-1, width=595, height=842):
doc._newPage(pno, width=width, height=height)
return doc[pno] | Create and return a new page object. |
375,604 | def all_casings(input_string):
if not input_string:
yield ""
else:
first = input_string[:1]
if first.lower() == first.upper():
for sub_casing in all_casings(input_string[1:]):
yield first + sub_casing
else:
for sub_casing in all_casings(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing | Permute all casings of a given string.
A pretty algorithm, via @Amber
http://stackoverflow.com/questions/6792803/finding-all-possible-case-permutations-in-python |
375,605 | def fitness(self, parsimony_coefficient=None):
if parsimony_coefficient is None:
parsimony_coefficient = self.parsimony_coefficient
penalty = parsimony_coefficient * len(self.program) * self.metric.sign
return self.raw_fitness_ - penalty | Evaluate the penalized fitness of the program according to X, y.
Parameters
----------
parsimony_coefficient : float, optional
If automatic parsimony is being used, the computed value according
to the population. Otherwise the initialized value is used.
Returns
-------
fitness : float
The penalized fitness of the program. |
375,606 | def _request(self, buf, properties, date=None):
self.ensure_alive()
try:
input_format = properties.get("inputFormat", "text")
if input_format == "text":
ctype = "text/plain; charset=utf-8"
elif input_format == "serialized":
ctype = "application/x-protobuf"
else:
raise ValueError("Unrecognized inputFormat " + input_format)
if date:
params = {: str(properties),: str(date)}
else:
params = {: str(properties)}
r = requests.post(self.endpoint,
params=params,
data=buf, headers={: ctype},
timeout=(self.timeout*2)/1000)
r.raise_for_status()
return r
except requests.HTTPError as e:
if r.text == "CoreNLP request timed out. Your document may be too long.":
raise TimeoutException(r.text)
else:
raise AnnotationException(r.text) | Send a request to the CoreNLP server.
:param (str | unicode) text: raw text for the CoreNLPServer to parse
:param (dict) properties: properties that the server expects
:param (str) date: reference date of document, used by server to set docDate - expects YYYY-MM-DD
:return: request result |
375,607 | def disable(cls, args):
mgr = NAppsManager()
if args[]:
napps = mgr.get_enabled()
else:
napps = args[]
for napp in napps:
mgr.set_napp(*napp)
LOG.info(, mgr.napp_id)
cls.disable_napp(mgr) | Disable subcommand. |
375,608 | def _config_profile_list(self):
url = self._cfg_profile_list_url
payload = {}
try:
res = self._send_request(, url, payload, )
if res and res.status_code in self._resp_ok:
return res.json()
except dexc.DfaClientRequestFailed:
LOG.error("Failed to send request to DCNM.") | Get list of supported config profile from DCNM. |
375,609 | def sample(self, size=1):
samples = scipy.stats.bernoulli.rvs(self.p, size=size)
if size == 1:
return samples[0]
return samples | Generate samples of the random variable.
Parameters
----------
size : int
The number of samples to generate.
Returns
-------
:obj:`numpy.ndarray` of int or int
The samples of the random variable. If `size == 1`, then
the returned value will not be wrapped in an array. |
375,610 | def GammaContrast(gamma=1, per_channel=False, name=None, deterministic=False, random_state=None):
params1d = [iap.handle_continuous_param(gamma, "gamma", value_range=None, tuple_to_uniform=True,
list_to_choice=True)]
func = adjust_contrast_gamma
return _ContrastFuncWrapper(
func, params1d, per_channel,
dtypes_allowed=["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
dtypes_disallowed=["float96", "float128", "float256", "bool"],
name=name if name is not None else ia.caller_name(),
deterministic=deterministic,
random_state=random_state
) | Adjust contrast by scaling each pixel value to ``255 * ((I_ij/255)**gamma)``.
Values in the range ``gamma=(0.5, 2.0)`` seem to be sensible.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_gamma`.
Parameters
----------
gamma : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Exponent for the contrast adjustment. Higher values darken the image.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform gamma contrast adjustment. |
375,611 | def set_value(self, value, timeout):
self.value = value
self.expiration = time.perf_counter() * 1000 + timeout | Sets a new value and extends its expiration.
:param value: a new cached value.
:param timeout: a expiration timeout in milliseconds. |
375,612 | def l2_regularizer(decay, name_filter=):
return regularizer(
,
lambda x: tf.nn.l2_loss(x) * decay,
name_filter=name_filter) | Create an l2 regularizer. |
375,613 | def gen_accept(id_, keysize=2048, force=False):
rs previously accepted key will be
overwritten.
.. code-block:: python
>>> wheel.cmd(, [])
{: ,
: }
We can now see that the ``foo`` minionkey.listacceptedminionsfoominion1minion2minion3
id_ = clean.id(id_)
ret = gen(id_, keysize)
acc_path = os.path.join(__opts__[], , id_)
if os.path.isfile(acc_path) and not force:
return {}
with salt.utils.files.fopen(acc_path, ) as fp_:
fp_.write(salt.utils.stringutils.to_str(ret[]))
return ret | r'''
Generate a key pair then accept the public key. This function returns the
key pair in a dict, only the public key is preserved on the master. Returns
a dictionary.
id\_
The name of the minion for which to generate a key pair.
keysize
The size of the key pair to generate. The size must be ``2048``, which
is the default, or greater. If set to a value less than ``2048``, the
key size will be rounded up to ``2048``.
force
If a public key has already been accepted for the given minion on the
master, then the gen_accept function will return an empty dictionary
and not create a new key. This is the default behavior. If ``force``
is set to ``True``, then the minion's previously accepted key will be
overwritten.
.. code-block:: python
>>> wheel.cmd('key.gen_accept', ['foo'])
{'pub': '-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBC
...
BBPfamX9gGPQTpN9e8HwcZjXQnmg8OrcUl10WHw09SDWLOlnW+ueTWugEQpPt\niQIDAQAB\n
-----END PUBLIC KEY-----',
'priv': '-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA42Kf+w9XeZWgguzv
...
QH3/W74X1+WTBlx4R2KGLYBiH+bCCFEQ/Zvcu4Xp4bIOPtRKozEQ==\n
-----END RSA PRIVATE KEY-----'}
We can now see that the ``foo`` minion's key has been accepted by the master:
.. code-block:: python
>>> wheel.cmd('key.list', ['accepted'])
{'minions': ['foo', 'minion1', 'minion2', 'minion3']} |
375,614 | def parse_valu(text, off=0):
_, off = nom(text, off, whites)
if nextchar(text, off, ):
return parse_list(text, off)
if isquote(text, off):
return parse_string(text, off)
try:
valu = int(valu, 0)
except ValueError:
pass
return valu, off | Special syntax for the right side of equals in a macro |
375,615 | def import_locations(self, data):
self._data = data
field_names = (, , , ,
, , ,
, , , ,
, , , , ,
, , )
comma_split = lambda s: s.split()
date_parse = lambda s: datetime.date(*map(int, s.split()))
or_none = lambda x, s: x(s) if s else None
str_or_none = lambda s: or_none(str, s)
float_or_none = lambda s: or_none(float, s)
int_or_none = lambda s: or_none(int, s)
tz_parse = lambda s: self.timezones[s][0] if self.timezones else None
field_parsers = (int_or_none, str_or_none, str_or_none, comma_split,
float_or_none, float_or_none, str_or_none,
str_or_none, str_or_none, comma_split, str_or_none,
str_or_none, str_or_none, str_or_none, int_or_none,
int_or_none, int_or_none, tz_parse, date_parse)
data = utils.prepare_csv_read(data, field_names, delimiter=r" ")
for row in data:
try:
for name, parser in zip(field_names, field_parsers):
row[name] = parser(row[name])
except ValueError:
raise utils.FileFormatError()
self.append(Location(**row)) | Parse geonames.org country database exports.
``import_locations()`` returns a list of :class:`trigpoints.Trigpoint`
objects generated from the data exported by geonames.org_.
It expects data files in the following tab separated format::
2633441 Afon Wyre Afon Wyre River Wayrai,River Wyrai,Wyre 52.3166667 -4.1666667 H STM GB GB 00 0 -9999 Europe/London 1994-01-13
2633442 Wyre Wyre Viera 59.1166667 -2.9666667 T ISL GB GB V9 0 1 Europe/London 2004-09-24
2633443 Wraysbury Wraysbury Wyrardisbury 51.45 -0.55 P PPL GB P9 0 28 Europe/London 2006-08-21
Files containing the data in this format can be downloaded from the
geonames.org_ site in their `database export page`_.
Files downloaded from the geonames site when processed by
``import_locations()`` will return ``list`` objects of the following
style::
[Location(2633441, "Afon Wyre", "Afon Wyre",
['River Wayrai', 'River Wyrai', 'Wyre'],
52.3166667, -4.1666667, "H", "STM", "GB", ['GB'], "00",
None, None, None, 0, None, -9999, "Europe/London",
datetime.date(1994, 1, 13)),
Location(2633442, "Wyre", "Wyre", ['Viera'], 59.1166667,
-2.9666667, "T", "ISL", "GB", ['GB'], "V9", None, None,
None, 0, None, 1, "Europe/London",
datetime.date(2004, 9, 24)),
Location(2633443, "Wraysbury", "Wraysbury", ['Wyrardisbury'],
51.45, -0.55, "P", "PPL", "GB", None, "P9", None, None,
None, 0, None, 28, "Europe/London",
datetime.date(2006, 8, 21))]
Args:
data (iter): geonames.org locations data to read
Returns:
list: geonames.org identifiers with :class:`Location` objects
Raises:
FileFormatError: Unknown file format
.. _geonames.org: http://www.geonames.org/
.. _database export page: http://download.geonames.org/export/dump/ |
375,616 | def portdate(port_number, date=None, return_format=None):
uri = .format(number=port_number)
if date:
try:
uri = .join([uri, date.strftime("%Y-%m-%d")])
except AttributeError:
uri = .join([uri, date])
response = _get(uri, return_format)
if in str(response):
raise Error(.format(number=port_number))
else:
return response | Information about a particular port at a particular date.
If the date is ommited, today's date is used.
:param port_number: a string or integer port number
:param date: an optional string in 'Y-M-D' format or datetime.date() object |
375,617 | def _GetTSKPartitionIdentifiers(self, scan_node):
if not scan_node or not scan_node.path_spec:
raise errors.ScannerError()
volume_system = tsk_volume_system.TSKVolumeSystem()
volume_system.Open(scan_node.path_spec)
volume_identifiers = self._source_scanner.GetVolumeIdentifiers(
volume_system)
if not volume_identifiers:
return []
if len(volume_identifiers) == 1:
return volume_identifiers
if not self._mediator:
raise errors.ScannerError(
)
try:
volume_identifiers = self._mediator.GetPartitionIdentifiers(
volume_system, volume_identifiers)
except KeyboardInterrupt:
raise errors.UserAbort()
return self._NormalizedVolumeIdentifiers(
volume_system, volume_identifiers, prefix=) | Determines the TSK partition identifiers.
Args:
scan_node (SourceScanNode): scan node.
Returns:
list[str]: TSK partition identifiers.
Raises:
ScannerError: if the format of or within the source is not supported or
the scan node is invalid or if the volume for a specific identifier
cannot be retrieved.
UserAbort: if the user requested to abort. |
375,618 | def vofile(filename, **kwargs):
basename = os.path.basename(filename)
if os.access(basename, os.R_OK):
return open(basename, )
kwargs[] = kwargs.get(, )
return client.open(filename, **kwargs) | Open and return a handle on a VOSpace data connection
@param filename:
@param kwargs:
@return: |
375,619 | def hybrid_forward(self, F, inputs, token_types, valid_length=None, masked_positions=None):
outputs = []
seq_out, attention_out = self._encode_sequence(F, inputs, token_types, valid_length)
outputs.append(seq_out)
if self.encoder._output_all_encodings:
assert isinstance(seq_out, list)
output = seq_out[-1]
else:
output = seq_out
if attention_out:
outputs.append(attention_out)
if self._use_pooler:
pooled_out = self._apply_pooling(output)
outputs.append(pooled_out)
if self._use_classifier:
next_sentence_classifier_out = self.classifier(pooled_out)
outputs.append(next_sentence_classifier_out)
if self._use_decoder:
assert masked_positions is not None, \
decoder_out = self._decode(output, masked_positions)
outputs.append(decoder_out)
return tuple(outputs) if len(outputs) > 1 else outputs[0] | Generate the representation given the inputs.
This is used in training or fine-tuning a static (hybridized) BERT model. |
375,620 | def find_first_fit(unoccupied_columns, row, row_length):
for free_col in unoccupied_columns:
first_item_x = row[0][0]
offset = free_col - first_item_x
if check_columns_fit(unoccupied_columns, row, offset, row_length):
return offset
raise ValueError("Row cannot bossily fit in %r: %r"
% (list(unoccupied_columns.keys()), row)) | Finds the first index that the row's items can fit. |
375,621 | def _to_numeric_float(number, nums_int):
index_end = len(number) - nums_int
return float(number[:nums_int] + + number[-index_end:]) | Transforms a string into a float.
The nums_int parameter indicates the number of characters, starting from
the left, to be used for the integer value. All the remaining ones will be
used for the decimal value.
:param number: string with the number
:param nums_int: characters, counting from the left, for the integer value
:return: a float created from the string |
375,622 | def unmarshal_json(
obj,
cls,
allow_extra_keys=True,
ctor=None,
):
return unmarshal_dict(
obj,
cls,
allow_extra_keys,
ctor=ctor,
) | Unmarshal @obj into @cls
Args:
obj: dict, A JSON object
cls: type, The class to unmarshal into
allow_extra_keys: bool, False to raise an exception when extra
keys are present, True to ignore
ctor: None-or-static-method: Use this method as the
constructor instead of __init__
Returns:
instance of @cls
Raises:
ExtraKeysError: If allow_extra_keys == False, and extra keys
are present in @obj and not in @cls.__init__
ValueError: If @cls.__init__ does not contain a self argument |
375,623 | def tostring(self, fully_qualified=True, pretty_print=True, encoding="UTF-8"):
root = self.serialize(fully_qualified=fully_qualified)
kwargs = {"pretty_print": pretty_print, "encoding": encoding}
if encoding != "unicode":
kwargs["xml_declaration"] = True
return etree.tostring(root, **kwargs) | Serialize and return a string of this METS document.
To write to file, see :meth:`write`.
The default encoding is ``UTF-8``. This method will return a unicode
string when ``encoding`` is set to ``unicode``.
:return: String of this document |
375,624 | def sigma_filter(filename, region, step_size, box_size, shape, domask, sid):
ymin, ymax = region
logging.debug(.format(ymin, ymax, strftime("%Y-%m-%d %H:%M:%S", gmtime())))
data_row_min = max(0, ymin - box_size[0]//2)
data_row_max = min(shape[0], ymax + box_size[0]//2)
NAXIS = fits.getheader(filename)["NAXIS"]
with fits.open(filename, memmap=True) as a:
if NAXIS == 2:
data = a[0].section[data_row_min:data_row_max, 0:shape[1]]
elif NAXIS == 3:
data = a[0].section[0, data_row_min:data_row_max, 0:shape[1]]
elif NAXIS == 4:
data = a[0].section[0, 0, data_row_min:data_row_max, 0:shape[1]]
else:
logging.error("Too many NAXIS for me {0}".format(NAXIS))
logging.error("fix your file to be more sane")
raise Exception("Too many NAXIS")
row_len = shape[1]
logging.debug(.format(data.shape))
def box(r, c):
r_min = max(0, r - box_size[0] // 2)
r_max = min(data.shape[0] - 1, r + box_size[0] // 2)
c_min = max(0, c - box_size[1] // 2)
c_max = min(data.shape[1] - 1, c + box_size[1] // 2)
return r_min, r_max, c_min, c_max
rows = list(range(ymin-data_row_min, ymax-data_row_min, step_size[0]))
rows.append(ymax-data_row_min)
cols = list(range(0, shape[1], step_size[1]))
cols.append(shape[1])
vals = np.zeros(shape=(len(rows),len(cols)))
for i, row in enumerate(rows):
for j, col in enumerate(cols):
r_min, r_max, c_min, c_max = box(row, col)
new = data[r_min:r_max, c_min:c_max]
new = np.ravel(new)
bkg, _ = sigmaclip(new, 3, 3)
vals[i,j] = bkg
gr, gc = np.mgrid[ymin-data_row_min:ymax-data_row_min, 0:shape[1]]
logging.debug("Interpolating bkg to sharemem")
ifunc = RegularGridInterpolator((rows, cols), vals)
for i in range(gr.shape[0]):
row = np.array(ifunc((gr[i], gc[i])), dtype=np.float32)
start_idx = np.ravel_multi_index((ymin+i, 0), shape)
end_idx = start_idx + row_len
ibkg[start_idx:end_idx] = row
del ifunc
logging.debug(" ... done writing bkg")
barrier(bkg_events, sid)
logging.debug("{0} background subtraction".format(sid))
for i in range(data_row_max - data_row_min):
start_idx = np.ravel_multi_index((data_row_min + i, 0), shape)
end_idx = start_idx + row_len
data[i, :] = data[i, :] - ibkg[start_idx:end_idx]
vals[:] = 0
for i, row in enumerate(rows):
for j, col in enumerate(cols):
r_min, r_max, c_min, c_max = box(row, col)
new = data[r_min:r_max, c_min:c_max]
new = np.ravel(new)
_ , rms = sigmaclip(new, 3, 3)
vals[i,j] = rms
logging.debug("Interpolating rm to sharemem rms")
ifunc = RegularGridInterpolator((rows, cols), vals)
for i in range(gr.shape[0]):
row = np.array(ifunc((gr[i], gc[i])), dtype=np.float32)
start_idx = np.ravel_multi_index((ymin+i, 0), shape)
end_idx = start_idx + row_len
irms[start_idx:end_idx] = row
del ifunc
logging.debug(" .. done writing rms")
if domask:
barrier(mask_events, sid)
logging.debug("applying mask")
for i in range(gr.shape[0]):
mask = np.where(np.bitwise_not(np.isfinite(data[i + ymin-data_row_min,:])))[0]
for j in mask:
idx = np.ravel_multi_index((i + ymin,j),shape)
ibkg[idx] = np.nan
irms[idx] = np.nan
logging.debug(" ... done applying mask")
logging.debug(.format(ymin, ymax, strftime("%Y-%m-%d %H:%M:%S", gmtime())))
return | Calculate the background and rms for a sub region of an image. The results are
written to shared memory - irms and ibkg.
Parameters
----------
filename : string
Fits file to open
region : list
Region within the fits file that is to be processed. (row_min, row_max).
step_size : (int, int)
The filtering step size
box_size : (int, int)
The size of the box over which the filter is applied (each step).
shape : tuple
The shape of the fits image
domask : bool
If true then copy the data mask to the output.
sid : int
The stripe number
Returns
-------
None |
375,625 | def get_clipboard_text_and_convert(paste_list=False):
u
txt = GetClipboardText()
if txt:
if paste_list and u"\t" in txt:
array, flag = make_list_of_list(txt)
if flag:
txt = repr(array)
else:
txt = u"array(%s)"%repr(array)
txt = u"".join([c for c in txt if c not in u" \t\r\n"])
return txt | u"""Get txt from clipboard. if paste_list==True the convert tab separated
data to list of lists. Enclose list of list in array() if all elements are
numeric |
375,626 | def image_id_from_registry(image_name):
registry, repository, tag = parse(image_name)
try:
token = auth_token(registry, repository).get("token")
if registry == "index.docker.io":
registry = "registry-1.docker.io"
res = requests.head("https://{}/v2/{}/manifests/{}".format(registry, repository, tag), headers={
"Authorization": "Bearer {}".format(token),
"Accept": "application/vnd.docker.distribution.manifest.v2+json"
}, timeout=5)
res.raise_for_status()
except requests.RequestException:
log.error("Received {} when attempting to get digest for {}".format(
res, image_name))
return None
return "@".join([registry+"/"+repository, res.headers["Docker-Content-Digest"]]) | Get the docker id from a public or private registry |
375,627 | def acceptRecord(self, item):
record = item.record()
self.treePopupWidget().close()
self.setCurrentRecord(record) | Closes the tree popup and sets the current record.
:param record | <orb.Table> |
375,628 | def _publish_message(host, amqp_settings, routing_key, data):
if host == "stdout":
print("Published to %s: %s" % (routing_key, data))
return True
try:
conn = Connection(**remove_nones(
host=host,
userid=amqp_settings.get("userid"),
password=amqp_settings.get("password"),
connect_timeout=amqp_settings.get("connect_timeout")
))
except socket.error as e:
print_error("Cannot connect to the message broker: %s" % (e))
return False
channel = conn.channel()
msg = basic_message.Message(**remove_nones(
body=json.dumps(data),
delivery_mode=amqp_settings.get("message_delivery_mode"),
content_type="application/json",
content_encoding="utf-8"
))
try:
channel.basic_publish(
msg,
amqp_settings["exchange_name"],
routing_key
)
except Exception as e:
print_error("Failed to publish message: %s" % (e))
return False
return True | Publish an AMQP message.
Returns:
bool: True if message was sent successfully. |
375,629 | def _add_generic(self, start_node, type_name, group_type_name, args, kwargs,
add_prefix=True, check_naming=True):
args = list(args)
create_new = True
name =
instance = None
constructor = None
add_link = type_name == LINK
if add_link:
name = args[0]
instance = args[1]
create_new = False
elif len(args) == 1 and len(kwargs) == 0:
item = args[0]
try:
name = item.v_full_name
instance = item
create_new = False
except AttributeError:
pass
if create_new:
if len(args) > 0 and inspect.isclass(args[0]):
constructor = args.pop(0)
if len(args) > 0 and isinstance(args[0], str):
name = args.pop(0)
elif in kwargs:
name = kwargs.pop()
elif in kwargs:
name = kwargs.pop()
else:
raise ValueError(
)
split_names = name.split()
if check_naming:
for idx, name in enumerate(split_names):
translated_shortcut, name = self._translate_shortcut(name)
replaced, name = self._replace_wildcards(name)
if translated_shortcut or replaced:
split_names[idx] = name
faulty_names = self._check_names(split_names, start_node)
if faulty_names:
full_name = .join(split_names)
raise ValueError(
% (full_name, faulty_names))
if add_link:
if instance is None:
raise ValueError()
if instance.v_is_root:
raise ValueError()
if start_node.v_is_root and name in SUBTREE_MAPPING:
raise ValueError( % name)
if not self._root_instance.f_contains(instance, with_links=False, shortcuts=False):
raise ValueError()
if add_prefix:
split_names = self._add_prefix(split_names, start_node, group_type_name)
if group_type_name == GROUP:
add_leaf = type_name != group_type_name and not add_link
group_type_name, type_name = self._determine_types(start_node, split_names[0],
add_leaf, add_link)
if self._root_instance._is_run and type_name in SENSITIVE_TYPES:
raise TypeError(
)
return self._add_to_tree(start_node, split_names, type_name, group_type_name, instance,
constructor, args, kwargs) | Adds a given item to the tree irrespective of the subtree.
Infers the subtree from the arguments.
:param start_node: The parental node the adding was initiated from
:param type_name:
The type of the new instance. Whether it is a parameter, parameter group, config,
config group, etc. See the name of the corresponding constants at the top of this
python module.
:param group_type_name:
Type of the subbranch. i.e. whether the item is added to the 'parameters',
'results' etc. These subbranch types are named as the group names
(e.g. 'PARAMETER_GROUP') in order to have less constants.
For all constants used see beginning of this python module.
:param args:
Arguments specifying how the item is added.
If len(args)==1 and the argument is the a given instance of a result or parameter,
this one is added to the tree.
Otherwise it is checked if the first argument is a class specifying how to
construct a new item and the second argument is the name of the new class.
If the first argument is not a class but a string, the string is assumed to be
the name of the new instance.
Additional args are later on used for the construction of the instance.
:param kwargs:
Additional keyword arguments that might be handed over to the instance constructor.
:param add_prefix:
If a prefix group, i.e. `results`, `config`, etc. should be added
:param check_naming:
If it should be checked for correct namings, can be set to ``False`` if data is loaded
and we know that all names are correct.
:return: The new added instance |
375,630 | def rotate_v1(array, k):
array = array[:]
n = len(array)
for i in range(k):
temp = array[n - 1]
for j in range(n-1, 0, -1):
array[j] = array[j - 1]
array[0] = temp
return array | Rotate the entire array 'k' times
T(n)- O(nk)
:type array: List[int]
:type k: int
:rtype: void Do not return anything, modify array in-place instead. |
375,631 | def update(self):
ret = True
fields = self.depopulate(True)
q = self.query
q.set_fields(fields)
pk = self.pk
if pk:
q.is_field(self.schema.pk.name, pk)
else:
raise ValueError("You cannot update without a primary key")
if q.update():
fields = q.fields
self._populate(fields)
else:
ret = False
return ret | re-persist the updated field values of this orm that has a primary key |
375,632 | def generate_monthly(rain_day_threshold, day_end_hour, use_dst,
daily_data, monthly_data, process_from):
start = monthly_data.before(datetime.max)
if start is None:
start = datetime.min
start = daily_data.after(start + SECOND)
if process_from:
if start:
start = min(start, process_from)
else:
start = process_from
if start is None:
return start
start = timezone.local_replace(
start, use_dst=use_dst, day=1, hour=day_end_hour, minute=0, second=0)
if day_end_hour >= 12:
start -= DAY
del monthly_data[start:]
stop = daily_data.before(datetime.max)
if stop is None:
return None
acc = MonthAcc(rain_day_threshold)
def monthlygen(inputdata):
month_start = start
count = 0
while month_start <= stop:
count += 1
if count % 12 == 0:
logger.info("monthly: %s", month_start.isoformat())
else:
logger.debug("monthly: %s", month_start.isoformat())
month_end = month_start + WEEK
if month_end.month < 12:
month_end = month_end.replace(month=month_end.month+1)
else:
month_end = month_end.replace(month=1, year=month_end.year+1)
month_end = month_end - WEEK
if use_dst:
month_end = timezone.local_replace(
month_end + HOURx3, use_dst=use_dst, hour=day_end_hour)
acc.reset()
for data in inputdata[month_start:month_end]:
acc.add_daily(data)
new_data = acc.result()
if new_data:
new_data[] = month_start
yield new_data
month_start = month_end
monthly_data.update(monthlygen(daily_data))
return start | Generate monthly summaries from daily data. |
375,633 | def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None):
lineno = lineno - 1
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
source = None
if loader is not None and hasattr(loader, "get_source"):
result = get_source_lines_from_loader(loader, module_name, lineno, lower_bound, upper_bound)
if result is not None:
return result
if source is None:
try:
with open(filename, "rb") as file_obj:
encoding = "utf8"
for line in itertools.islice(file_obj, 0, 2):
match = _coding_re.search(line.decode("utf8"))
if match:
encoding = match.group(1)
break
file_obj.seek(0)
lines = [
compat.text_type(line, encoding, "replace")
for line in itertools.islice(file_obj, lower_bound, upper_bound + 1)
]
offset = lineno - lower_bound
return (
[l.strip("\r\n") for l in lines[0:offset]],
lines[offset].strip("\r\n"),
[l.strip("\r\n") for l in lines[offset + 1 :]] if len(lines) > offset else [],
)
except (OSError, IOError, IndexError):
pass
return None, None, None | Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context). |
375,634 | def store(self, database, validate=True, role=None):
if validate:
self.validate()
self._id, self._rev = database.save(self.to_primitive(role=role))
return self | Store the document in the given database.
:param database: the `Database` object source for storing the document.
:return: an updated instance of `Document` / self. |
375,635 | def batch_run(self, *commands):
original_retries = self.repeat_commands
self.repeat_commands = 1
for _ in range(original_retries):
for command in commands:
cmd = command[0]
args = command[1:]
cmd(*args)
self.repeat_commands = original_retries | Run batch of commands in sequence.
Input is positional arguments with (function pointer, *args) tuples.
This method is useful for executing commands to multiple groups with retries,
without having too long delays. For example,
- Set group 1 to red and brightness to 10%
- Set group 2 to red and brightness to 10%
- Set group 3 to white and brightness to 100%
- Turn off group 4
With three repeats, running these consecutively takes approximately 100ms * 13 commands * 3 times = 3.9 seconds.
With batch_run, execution takes same time, but first loop - each command is sent once to every group -
is finished within 1.3 seconds. After that, each command is repeated two times. Most of the time, this ensures
slightly faster changes for each group.
Usage:
led.batch_run((led.set_color, "red", 1), (led.set_brightness, 10, 1), (led.set_color, "white", 3), ...) |
375,636 | def _get_schema_loader(self, strict=False):
return functools.partial(schema.load_schema, version=self.version,
strict=strict) | Gets a closure for schema.load_schema with the correct/current
Opsview version |
375,637 | def readin_rho(filename, rhofile=True, aniso=False):
if aniso:
a = [[0, 1, 2], [2, 3, 4]]
else:
a = [0, 2]
if rhofile:
if filename is None:
filename =
with open(filename, ) as fid:
mag = np.loadtxt(fid, skiprows=1, usecols=(a[0]))
else:
if filename is None:
filename = read_iter()
with open(filename, ) as fid:
mag = np.power(10, np.loadtxt(fid, skiprows=1, usecols=(a[1])))
return mag | Read in the values of the resistivity in Ohmm.
The format is variable: rho-file or mag-file. |
375,638 | def sighash(self, sighash_type, index=0, joinsplit=False, script_code=None,
anyone_can_pay=False, prevout_value=None):
if joinsplit and anyone_can_pay:
raise ValueError(t be used with joinsplitsZcashSigHashbb09b876')) | ZIP243
https://github.com/zcash/zips/blob/master/zip-0243.rst |
375,639 | def _validate_caller_vcf(call_vcf, truth_vcf, callable_bed, svcaller, work_dir, data):
stats = _calculate_comparison_stats(truth_vcf)
call_vcf = _prep_vcf(call_vcf, callable_bed, dd.get_sample_name(data), dd.get_sample_name(data),
stats, work_dir, data)
truth_vcf = _prep_vcf(truth_vcf, callable_bed, vcfutils.get_samples(truth_vcf)[0],
"%s-truth" % dd.get_sample_name(data), stats, work_dir, data)
cmp_vcf = _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data)
return _comparison_stats_from_merge(cmp_vcf, stats, svcaller, data) | Validate a caller VCF against truth within callable regions using SURVIVOR.
Combines files with SURIVOR merge and counts (https://github.com/fritzsedlazeck/SURVIVOR/) |
375,640 | def _ValidateDataTypeDefinition(cls, data_type_definition):
if not cls._IsIdentifier(data_type_definition.name):
raise ValueError(
.format(
data_type_definition.name))
if keyword.iskeyword(data_type_definition.name):
raise ValueError(
.format(
data_type_definition.name))
members = getattr(data_type_definition, , None)
if not members:
raise ValueError(
.format(
data_type_definition.name))
defined_attribute_names = set()
for member_definition in members:
attribute_name = member_definition.name
if not cls._IsIdentifier(attribute_name):
raise ValueError(.format(
attribute_name))
if attribute_name.startswith():
raise ValueError(.format(
attribute_name))
if keyword.iskeyword(attribute_name):
raise ValueError(.format(
attribute_name))
if attribute_name in defined_attribute_names:
raise ValueError(.format(
attribute_name))
defined_attribute_names.add(attribute_name) | Validates the data type definition.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Raises:
ValueError: if the data type definition is not considered valid. |
375,641 | def decode_bbox_target(box_predictions, anchors):
orig_shape = tf.shape(anchors)
box_pred_txtytwth = tf.reshape(box_predictions, (-1, 2, 2))
box_pred_txty, box_pred_twth = tf.split(box_pred_txtytwth, 2, axis=1)
anchors_x1y1x2y2 = tf.reshape(anchors, (-1, 2, 2))
anchors_x1y1, anchors_x2y2 = tf.split(anchors_x1y1x2y2, 2, axis=1)
waha = anchors_x2y2 - anchors_x1y1
xaya = (anchors_x2y2 + anchors_x1y1) * 0.5
clip = np.log(config.PREPROC.MAX_SIZE / 16.)
wbhb = tf.exp(tf.minimum(box_pred_twth, clip)) * waha
xbyb = box_pred_txty * waha + xaya
x1y1 = xbyb - wbhb * 0.5
x2y2 = xbyb + wbhb * 0.5
out = tf.concat([x1y1, x2y2], axis=-2)
return tf.reshape(out, orig_shape) | Args:
box_predictions: (..., 4), logits
anchors: (..., 4), floatbox. Must have the same shape
Returns:
box_decoded: (..., 4), float32. With the same shape. |
375,642 | async def serve(
app: ASGIFramework,
config: Config,
*,
task_status: trio._core._run._TaskStatus = trio.TASK_STATUS_IGNORED,
) -> None:
if config.debug:
warnings.warn("The config `debug` has no affect when using serve", Warning)
if config.workers != 1:
warnings.warn("The config `workers` has no affect when using serve", Warning)
if config.worker_class != "asyncio":
warnings.warn("The config `worker_class` has no affect when using serve", Warning)
await worker_serve(app, config, task_status=task_status) | Serve an ASGI framework app given the config.
This allows for a programmatic way to serve an ASGI framework, it
can be used via,
.. code-block:: python
trio.run(partial(serve, app, config))
It is assumed that the event-loop is configured before calling
this function, therefore configuration values that relate to loop
setup or process setup are ignored. |
375,643 | def gene_tree(
self,
scale_to=None,
population_size=1,
trim_names=True,
):
tree = self.template or self.yule()
for leaf in tree._tree.leaf_node_iter():
leaf.num_genes = 1
dfr = tree._tree.seed_node.distance_from_root()
dft = tree._tree.seed_node.distance_from_tip()
tree_height = dfr + dft
if scale_to:
population_size = tree_height / scale_to
for edge in tree._tree.preorder_edge_iter():
edge.pop_size = population_size
gene_tree = dpy.simulate.treesim.constrained_kingman_tree(tree._tree)[0]
if trim_names:
for leaf in gene_tree.leaf_node_iter():
leaf.taxon.label = leaf.taxon.label.replace(_gene_treenewick;\n;species_tree': tree} | Using the current tree object as a species tree, generate a gene
tree using the constrained Kingman coalescent process from dendropy. The
species tree should probably be a valid, ultrametric tree, generated by
some pure birth, birth-death or coalescent process, but no checks are
made. Optional kwargs are: -- scale_to, which is a floating point value
to scale the total tree tip-to-root length to, -- population_size, which
is a floating point value which all branch lengths will be divided by to
convert them to coalescent units, and -- trim_names, boolean, defaults
to true, trims off the number which dendropy appends to the sequence
name |
375,644 | def get_plugin(self, name):
for p in self._plugins:
if p.name == name:
return p
return None | Get a plugin by its name from the plugins loaded for the current namespace
:param name:
:return: |
375,645 | def parse_def(self, text):
self.__init__()
if not is_start_of_function(text):
return
self.func_indent = get_indent(text)
text = text.strip()
text = text.replace(, )
text = text.replace(, )
return_type_re = re.search(r, text)
if return_type_re:
self.return_type_annotated = return_type_re.group(1)
text_end = text.rfind(return_type_re.group(0))
else:
self.return_type_annotated = None
text_end = len(text)
pos_args_start = text.find() + 1
pos_args_end = text.rfind(, pos_args_start, text_end)
self.args_text = text[pos_args_start:pos_args_end]
args_list = self.split_args_text_to_list(self.args_text)
if args_list is not None:
self.has_info = True
self.split_arg_to_name_type_value(args_list) | Parse the function definition text. |
375,646 | def draw(self, viewer):
cache = self.get_cache(viewer)
if not cache.drawn:
cache.drawn = True
viewer.redraw(whence=2)
cpoints = self.get_cpoints(viewer)
cr = viewer.renderer.setup_cr(self)
if self.linewidth > 0:
cr.draw_polygon(cpoints)
if self.showcap:
self.draw_caps(cr, self.cap, cpoints) | General draw method for RGB image types.
Note that actual insertion of the image into the output is
handled in `draw_image()` |
375,647 | def make_geohash_tables(table,listofprecisions,**kwargs):
return_squares = False
sort_by =
for key,value in kwargs.iteritems():
if key == :
sort_by = value
if key == :
return_squares = value
header = df2list(table)[0]
columns = header[10:]
originaltable = table
if not sort_by == :
originaltable = originaltable.sort([sort_by],ascending=[0])
listofprecisions = sorted(listofprecisions,reverse=True)
if return_squares == True and listofprecisions[-1] == 8:
total_list = [table]
elif return_squares == True:
total_list = []
for row in listofprecisions:
precision = int(row)
table = originaltable
table[] = table.GEOHASH.str[:precision]
table = table[[,]+columns].groupby([],sort=True).sum()
table = table.sort([sort_by],ascending=[0])
table = table.reset_index()
newsquares = [header]
for row in df2list(table)[1:]:
points = get_points_geohash(row[0])
newrow = [row[0]] + points + row[1:]
newsquares.append(newrow)
table = list2df(newsquares)
if return_squares == True:
total_list.append(table)
else:
table.to_csv( + str(precision) + ,index=False)
if return_squares == True:
return total_list
else:
print | sort_by - field to sort by for each group
return_squares - boolean arg if true returns a list of squares instead of writing out to table |
375,648 | def ckf_transform(Xs, Q):
m, n = Xs.shape
x = sum(Xs, 0)[:, None] / m
P = np.zeros((n, n))
xf = x.flatten()
for k in range(m):
P += np.outer(Xs[k], Xs[k]) - np.outer(xf, xf)
P *= 1 / m
P += Q
return x, P | Compute mean and covariance of array of cubature points.
Parameters
----------
Xs : ndarray
Cubature points
Q : ndarray
Noise covariance
Returns
-------
mean : ndarray
mean of the cubature points
variance: ndarray
covariance matrix of the cubature points |
375,649 | def add_team_repo(repo_name, team_name, profile="github", permission=None):
pullpushadminmy_repoteam_name
team = get_team(team_name, profile=profile)
if not team:
log.error(, team_name)
return False
try:
client = _get_client(profile)
organization = client.get_organization(
_get_config_value(profile, )
)
team = organization.get_team(team[])
repo = organization.get_repo(repo_name)
except UnknownObjectException:
log.exception(, team[])
return False
params = None
if permission is not None:
params = {: permission}
headers, data = team._requester.requestJsonAndCheck(
"PUT",
team.url + "/repos/" + repo._identity,
input=params
)
list_team_repos(team_name, profile=profile, ignore_cache=True)
return True | Adds a repository to a team with team_name.
repo_name
The name of the repository to add.
team_name
The name of the team of which to add the repository.
profile
The name of the profile configuration to use. Defaults to ``github``.
permission
The permission for team members within the repository, can be 'pull',
'push' or 'admin'. If not specified, the default permission specified on
the team will be used.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt myminion github.add_team_repo 'my_repo' 'team_name'
.. versionadded:: 2016.11.0 |
375,650 | def PILTowx(pimg):
from MAVProxy.modules.lib.wx_loader import wx
wimg = wx.EmptyImage(pimg.size[0], pimg.size[1])
try:
wimg.SetData(pimg.convert().tobytes())
except NotImplementedError:
wimg.SetData(pimg.convert().tostring())
return wimg | convert a PIL Image to a wx image |
375,651 | def _what_default(self, pronunciation):
token_default = self[][][]
index_count = 2*len(pronunciation) + 1
predictions = {}
for i in range(index_count):
index_predictions = {}
if i % 2 == 0:
index_predictions.update(token_default[])
else:
presented_phoneme = pronunciation[int((i-1)/2)]
index_predictions[presented_phoneme] = token_default[][]
index_predictions[] = token_default[][]
index_predictions[] = token_default[][]
predictions[.format(i)] = index_predictions
return predictions | Provide the default prediction of the what task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param pronunciation: The list or array of confusion probabilities at each index |
375,652 | def purge_stream(self, stream_id, remove_definition=False, sandbox=None):
if sandbox is not None:
raise NotImplementedError
if stream_id not in self.streams:
raise StreamNotFoundError("Stream with id not found".format(stream_id))
stream = self.streams[stream_id]
query = stream_id.as_raw()
with switch_db(StreamInstanceModel, ):
StreamInstanceModel.objects(__raw__=query).delete()
stream.calculated_intervals = TimeIntervals([])
if remove_definition:
with switch_db(StreamDefinitionModel, ):
StreamDefinitionModel.objects(__raw__=query).delete()
logging.info("Purged stream {}".format(stream_id)) | Purge the stream
:param stream_id: The stream identifier
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox for this stream
:return: None
:raises: NotImplementedError |
375,653 | def is_empty(self):
return all(isinstance(c, ParseNode) and c.is_empty for c in self.children) | Returns True if this node has no children, or if all of its children are ParseNode instances
and are empty. |
375,654 | def join(chord_root, quality=, extensions=None, bass=):
r
chord_label = chord_root
if quality or extensions:
chord_label += ":%s" % quality
if extensions:
chord_label += "(%s)" % ",".join(extensions)
if bass and bass != :
chord_label += "/%s" % bass
validate_chord_label(chord_label)
return chord_label | r"""Join the parts of a chord into a complete chord label.
Parameters
----------
chord_root : str
Root pitch class of the chord, e.g. 'C', 'Eb'
quality : str
Quality of the chord, e.g. 'maj', 'hdim7'
(Default value = '')
extensions : list
Any added or absent scaled degrees for this chord, e.g. ['4', '\*3']
(Default value = None)
bass : str
Scale degree of the bass note, e.g. '5'.
(Default value = '')
Returns
-------
chord_label : str
A complete chord label. |
375,655 | def get_all_alert(self, **kwargs):
kwargs[] = True
if kwargs.get():
return self.get_all_alert_with_http_info(**kwargs)
else:
(data) = self.get_all_alert_with_http_info(**kwargs)
return data | Get all alerts for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_alert(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedAlert
If the method is called asynchronously,
returns the request thread. |
375,656 | def remove(self, element, multiplicity=None):
_elements = self._elements
if element not in _elements:
raise KeyError
old_multiplicity = _elements.get(element, 0)
if multiplicity is None or multiplicity >= old_multiplicity:
del _elements[element]
self._total -= old_multiplicity
elif multiplicity < 0:
raise ValueError("Multiplicity must be not be negative")
elif multiplicity > 0:
_elements[element] -= multiplicity
self._total -= multiplicity
return old_multiplicity | Removes an element from the multiset.
If no multiplicity is specified, the element is completely removed from the multiset:
>>> ms = Multiset('aabbbc')
>>> ms.remove('a')
2
>>> sorted(ms)
['b', 'b', 'b', 'c']
If the multiplicity is given, it is subtracted from the element's multiplicity in the multiset:
>>> ms.remove('b', 2)
3
>>> sorted(ms)
['b', 'c']
It is not an error to remove more elements than are in the set:
>>> ms.remove('b', 2)
1
>>> sorted(ms)
['c']
This extends the :meth:`MutableSet.remove` signature to allow specifying the multiplicity.
Args:
element:
The element to remove from the multiset.
multiplicity:
An optional multiplicity i.e. count of elements to remove.
Returns:
The multiplicity of the element in the multiset before
the removal.
Raises:
KeyError: if the element is not contained in the set. Use :meth:`discard` if
you do not want an exception to be raised. |
375,657 | def resolve_memory_access(self, tb, x86_mem_operand):
size = self.__get_memory_access_size(x86_mem_operand)
addr = None
if x86_mem_operand.base:
addr = ReilRegisterOperand(x86_mem_operand.base, size)
if x86_mem_operand.index and x86_mem_operand.scale != 0x0:
index = ReilRegisterOperand(x86_mem_operand.index, size)
scale = ReilImmediateOperand(x86_mem_operand.scale, size)
scaled_index = tb.temporal(size)
tb.add(tb._builder.gen_mul(index, scale, scaled_index))
if addr:
tmp = tb.temporal(size)
tb.add(tb._builder.gen_add(addr, scaled_index, tmp))
addr = tmp
else:
addr = scaled_index
if x86_mem_operand.displacement != 0x0:
disp = ReilImmediateOperand(x86_mem_operand.displacement, size)
if addr:
tmp = tb.temporal(size)
tb.add(tb._builder.gen_add(addr, disp, tmp))
addr = tmp
else:
addr = disp
else:
if not addr:
disp = ReilImmediateOperand(x86_mem_operand.displacement, size)
addr = disp
if x86_mem_operand.segment in ["gs", "fs"]:
seg_base_addr_map = {
"gs": "gs_base_addr",
"fs": "fs_base_addr",
}
seg_base = ReilRegisterOperand(seg_base_addr_map[x86_mem_operand.segment], size)
if addr:
tmp = tb.temporal(size)
tb.add(tb._builder.gen_add(addr, seg_base, tmp))
addr = tmp
else:
addr = seg_base
return addr | Return operand memory access translation. |
375,658 | def change_dir(self, session, path):
if path == "-":
path = self._previous_path or "."
try:
previous = os.getcwd()
os.chdir(path)
except IOError as ex:
session.write_line("Error changing directory: {0}", ex)
else:
self._previous_path = previous
session.write_line(os.getcwd()) | Changes the working directory |
375,659 | def LogoOverlay(sites, overlayfile, overlay, nperline, sitewidth, rmargin, logoheight, barheight, barspacing, fix_limits={}, fixlongname=False, overlay_cmap=None, underlay=False, scalebar=False):
if os.path.splitext(overlayfile)[1] != :
raise ValueError("overlayfile must end in .pdf: %s" % overlayfile)
if not overlay_cmap:
(cmap, mapping_d, mapper) = KyteDoolittleColorMapping()
else:
mapper = pylab.cm.ScalarMappable(cmap=overlay_cmap)
cmap = mapper.get_cmap()
pts_per_inch = 72.0
matplotlib.rc(, usetex=True)
matplotlib.rc(, labelsize=8)
matplotlib.rc(, direction=)
matplotlib.rc(, direction=)
matplotlib.rc(, linewidth=0.5)
matplotlib.rc(, size=3)
matplotlib.rc(, size=2.5)
colorbar_bmargin = 20
colorbar_tmargin = 15
nlines = int(math.ceil(len(sites) / float(nperline)))
lmargin = 25
barwidth = nperline * sitewidth
figwidth = lmargin + rmargin + barwidth
figheight = nlines * (logoheight + len(overlay) * (barheight +
barspacing)) + (barheight + colorbar_bmargin + colorbar_tmargin) + (
int(underlay) * len(overlay) * (barheight + barspacing))
fig = pylab.figure(figsize=(figwidth / pts_per_inch, figheight / pts_per_inch))
prop_types = {}
for (prop_d, shortname, longname) in overlay:
if shortname == longname == :
assert all([(isinstance(prop, str) and len(prop) == 1) for
prop in prop_d.values()]),
proptype =
(vmin, vmax) = (0, 1)
propcategories = None
elif all([isinstance(prop, str) for prop in prop_d.values()]):
proptype =
propcategories = list(set(prop_d.values()))
propcategories.sort()
(vmin, vmax) = (0, len(propcategories) - 1)
elif all ([isinstance(prop, (int, float)) for prop in prop_d.values()]):
proptype =
propcategories = None
(vmin, vmax) = (min(prop_d.values()), max(prop_d.values()))
if vmin >= 0 and vmin / float(vmax - vmin) < 0.05:
vmin = 0.0
if 0.9 <= vmax <= 1.0:
vmax = 1.0
else:
raise ValueError("Property %s is neither continuous or discrete. Values are:\n%s" % (shortname, str(prop_d.items())))
if shortname in fix_limits:
(vmin, vmax) = (min(fix_limits[shortname][0]), max(fix_limits[shortname][0]))
assert vmin < vmax, "vmin >= vmax, did you incorrectly use fix_vmin and fix_vmax?"
prop_types[shortname] = (proptype, vmin, vmax, propcategories)
assert len(prop_types) == len(overlay), "Not as many property types as overlays. Did you give the same name (shortname) to multiple properties in the overlay?"
prop_image = {}
for iline in range(nlines):
isites = sites[iline * nperline : min(len(sites), (iline + 1) * nperline)]
xlength = len(isites) * sitewidth
logo_ax = pylab.axes([lmargin / figwidth, ((nlines - iline - 1) * (logoheight + len(overlay) * (barspacing + barheight))) / figheight, xlength / figwidth, logoheight / figheight], frameon=False)
logo_ax.yaxis.set_ticks_position()
logo_ax.xaxis.set_ticks_position()
pylab.yticks([])
pylab.xlim(0.5, len(isites) + 0.5)
pylab.xticks([])
for (iprop, (prop_d, shortname, longname)) in enumerate(overlay):
(proptype, vmin, vmax, propcategories) = prop_types[shortname]
prop_ax = pylab.axes([
lmargin / figwidth,
((nlines - iline - 1) * (logoheight +
len(overlay) * (barspacing + barheight)) +
(1 - int(underlay)) * logoheight + int(underlay) *
barspacing + iprop * (barspacing + barheight))
/ figheight,
xlength / figwidth,
barheight / figheight],
frameon=(proptype != ))
prop_ax.xaxis.set_ticks_position()
pylab.xticks([])
pylab.xlim((0, len(isites)))
pylab.ylim(-0.5, 0.5)
if proptype == :
pylab.yticks([])
prop_ax.yaxis.set_ticks_position()
for (isite, site) in enumerate(isites):
pylab.text(isite + 0.5, -0.5, prop_d[site], size=9,
horizontalalignment=, family=)
continue
pylab.yticks([0], [shortname], size=8)
prop_ax.yaxis.set_ticks_position()
propdata = pylab.zeros(shape=(1, len(isites)))
propdata[ : ] = pylab.nan
for (isite, site) in enumerate(isites):
if site in prop_d:
if proptype == :
propdata[(0, isite)] = prop_d[site]
elif proptype == :
propdata[(0, isite)] = propcategories.index(prop_d[site])
else:
raise ValueError()
prop_image[shortname] = pylab.imshow(propdata, interpolation=, aspect=, extent=[0, len(isites), 0.5, -0.5], cmap=cmap, vmin=vmin, vmax=vmax)
pylab.yticks([0], [shortname], size=8)
ncolorbars = len([p for p in prop_types.values() if p[0] != ])
if scalebar:
ncolorbars += 1
if ncolorbars == 1:
colorbarwidth = 0.4
colorbarspacingwidth = 1.0 - colorbarwidth
elif ncolorbars:
colorbarspacingfrac = 0.5
colorbarwidth = 1.0 / (ncolorbars * (1.0 + colorbarspacingfrac))
colorbarspacingwidth = colorbarwidth * colorbarspacingfrac
ybottom = 1.0 - (colorbar_tmargin + barheight) / figheight
propnames = {}
icolorbar = -1
icolorbarshift = 0
while icolorbar < len(overlay):
if icolorbar == -1:
icolorbar += 1
if scalebar:
(scalebarheight, scalebarlabel) = scalebar
xleft = (colorbarspacingwidth * 0.5 + icolorbar *
(colorbarwidth + colorbarspacingwidth))
ytop = 1 - colorbar_tmargin / figheight
scalebarheightfrac = scalebarheight / figheight
fullfigax = pylab.axes([0, 0, 1, 1], facecolor=(1, 1, 1, 0))
fullfigax.axvline(x=xleft, ymin=ytop - scalebarheightfrac,
ymax=ytop, color=, linewidth=1.5)
pylab.text(xleft + 0.005, ytop - scalebarheightfrac / 2.0,
scalebarlabel, verticalalignment=,
horizontalalignment=,
transform=fullfigax.transAxes)
continue
(prop_d, shortname, longname) = overlay[icolorbar]
icolorbar += 1
(proptype, vmin, vmax, propcategories) = prop_types[shortname]
if proptype == :
icolorbarshift += 1
continue
if shortname == longname or not longname:
propname = shortname
elif fixlongname:
propname = longname
else:
propname = "%s (%s)" % (longname, shortname)
colorbar_ax = pylab.axes([colorbarspacingwidth * 0.5 + (icolorbar - icolorbarshift - int(not bool(scalebar))) * (colorbarwidth + colorbarspacingwidth), ybottom, colorbarwidth, barheight / figheight], frameon=True)
colorbar_ax.xaxis.set_ticks_position()
colorbar_ax.yaxis.set_ticks_position()
pylab.xticks([])
pylab.yticks([])
pylab.title(propname, size=9)
if proptype == :
cb = pylab.colorbar(prop_image[shortname], cax=colorbar_ax, orientation=)
if -0.1 <= vmin <= 0 and 1.0 <= vmax <= 1.15:
cb.set_ticks([0, 0.5, 1])
cb.set_ticklabels([, , ])
if 4 < (vmax - vmin) <= 11:
fixedticks = [itick for itick in range(int(vmin), int(vmax) + 1)]
cb.set_ticks(fixedticks)
cb.set_ticklabels([str(itick) for itick in fixedticks])
elif proptype == :
cb = pylab.colorbar(prop_image[shortname], cax=colorbar_ax, orientation=, boundaries=[i for i in range(len(propcategories) + 1)], values=[i for i in range(len(propcategories))])
cb.set_ticks([i + 0.5 for i in range(len(propcategories))])
cb.set_ticklabels(propcategories)
else:
raise ValueError("Invalid proptype")
if shortname in fix_limits:
(ticklocs, ticknames) = fix_limits[shortname]
cb.set_ticks(ticklocs)
cb.set_ticklabels(ticknames)
pylab.savefig(overlayfile, transparent=True) | Makes overlay for *LogoPlot*.
This function creates colored bars overlay bars showing up to two
properties.
The trick of this function is to create the bars the right
size so they align when they overlay the logo plot.
CALLING VARIABLES:
* *sites* : same as the variable of this name used by *LogoPlot*.
* *overlayfile* is a string giving the name of created PDF file containing
the overlay. It must end in the extension ``.pdf``.
* *overlay* : same as the variable of this name used by *LogoPlot*.
* *nperline* : same as the variable of this name used by *LogoPlot*.
* *sitewidth* is the width of each site in points.
* *rmargin* is the right margin in points.
* *logoheight* is the total height of each logo row in points.
* *barheight* is the total height of each bar in points.
* *barspacing* is the vertical spacing between bars in points.
* *fix_limits* has the same meaning of the variable of this name used by *LogoPlot*.
* *fixlongname* has the same meaning of the variable of this name used by *LogoPlot*.
* *overlay_cmap* has the same meaning of the variable of this name used by *LogoPlot*.
* *underlay* is a bool. If `True`, make an underlay rather than an overlay.
* *scalebar: if not `False`, is 2-tuple `(scalebarheight, scalebarlabel)`
where `scalebarheight` is in points. |
375,660 | def _encode_sequence(self, inputs, token_types, valid_length=None):
word_embedding = self.word_embed(inputs)
type_embedding = self.token_type_embed(token_types)
embedding = word_embedding + type_embedding
outputs, additional_outputs = self.encoder(embedding, None, valid_length)
return outputs, additional_outputs | Generate the representation given the input sequences.
This is used for pre-training or fine-tuning a BERT model. |
375,661 | def clear(self):
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self) | od.clear() -> None. Remove all items from od. |
375,662 | def well(self, idx) -> Well:
if isinstance(idx, int):
res = self._wells[idx]
elif isinstance(idx, str):
res = self.wells_by_index()[idx]
else:
res = NotImplemented
return res | Deprecated---use result of `wells` or `wells_by_index` |
375,663 | def determine_override_options(selected_options: tuple, override_opts: DictLike, set_of_possible_options: tuple = ()) -> Dict[str, Any]:
override_dict: Dict[str, Any] = {}
for option in override_opts:
if str(option) in list(map(lambda opt: str(opt), selected_options)):
override_dict.update(determine_override_options(selected_options, override_opts[option], set_of_possible_options))
else:
logger.debug(f"override_opts: {override_opts}")
logger.debug(f"Storing override option \"{option}\", with value \"{override_opts[option]}\"")
override_dict[option] = override_opts[option]
else:
logger.debug(f"Found option \"{option}\" as possible option, so skipping!")
return override_dict | Recursively extract the dict described in override_options().
In particular, this searches for selected options in the override_opts dict. It stores only
the override options that are selected.
Args:
selected_options: The options selected for this analysis, in the order defined used
with ``override_options()`` and in the configuration file.
override_opts: dict-like object returned by ruamel.yaml which contains the options that
should be used to override the configuration options.
set_of_possible_options (tuple of enums): Possible options for the override value categories. |
375,664 | def broadcast_1d_array(arr, ndim, axis=1):
ext_arr = arr
for i in range(ndim - 1):
ext_arr = np.expand_dims(ext_arr, axis=axis)
return ext_arr | Broadcast 1-d array `arr` to `ndim` dimensions on the first axis
(`axis`=0) or on the last axis (`axis`=1).
Useful for 'outer' calculations involving 1-d arrays that are related to
different axes on a multidimensional grid. |
375,665 | def read_tpld_stats(self):
payloads_stats = OrderedDict()
for tpld in self.tplds.values():
payloads_stats[tpld] = tpld.read_stats()
return payloads_stats | :return: dictionary {tpld index {group name {stat name: value}}}.
Sea XenaTpld.stats_captions. |
375,666 | def format_out_of_country_keeping_alpha_chars(numobj, region_calling_from):
num_raw_input = numobj.raw_input
if num_raw_input is None or len(num_raw_input) == 0:
return format_out_of_country_calling_number(numobj, region_calling_from)
country_code = numobj.country_code
if not _has_valid_country_calling_code(country_code):
return num_raw_input
num_raw_input = _normalize_helper(num_raw_input,
_ALL_PLUS_NUMBER_GROUPING_SYMBOLS,
True)
national_number = national_significant_number(numobj)
if len(national_number) > 3:
first_national_number_digit = num_raw_input.find(national_number[:3])
if first_national_number_digit != -1:
num_raw_input = num_raw_input[first_national_number_digit:]
metadata_for_region_calling_from = PhoneMetadata.metadata_for_region(region_calling_from.upper(), None)
if country_code == _NANPA_COUNTRY_CODE:
if is_nanpa_country(region_calling_from):
return unicod(country_code) + U_SPACE + num_raw_input
elif (metadata_for_region_calling_from is not None and
country_code == country_code_for_region(region_calling_from)):
formatting_pattern = _choose_formatting_pattern_for_number(metadata_for_region_calling_from.number_format,
national_number)
if formatting_pattern is None:
return num_raw_input
new_format = _copy_number_format(formatting_pattern)
new_format.pattern = u("(\\d+)(.*)")
new_format.format = u(r"\1\2")
return _format_nsn_using_pattern(num_raw_input,
new_format,
PhoneNumberFormat.NATIONAL)
i18n_prefix_for_formatting = U_EMPTY_STRING
if metadata_for_region_calling_from is not None:
international_prefix = metadata_for_region_calling_from.international_prefix
i18n_match = fullmatch(_SINGLE_INTERNATIONAL_PREFIX, international_prefix)
if i18n_match:
i18n_prefix_for_formatting = international_prefix
else:
i18n_prefix_for_formatting = metadata_for_region_calling_from.preferred_international_prefix
region_code = region_code_for_country_code(country_code)
metadata_for_region = PhoneMetadata.metadata_for_region_or_calling_code(country_code, region_code)
formatted_number = _maybe_append_formatted_extension(numobj,
metadata_for_region,
PhoneNumberFormat.INTERNATIONAL,
num_raw_input)
if i18n_prefix_for_formatting:
formatted_number = (i18n_prefix_for_formatting + U_SPACE +
unicod(country_code) + U_SPACE + formatted_number)
else:
formatted_number = _prefix_number_with_country_calling_code(country_code,
PhoneNumberFormat.INTERNATIONAL,
formatted_number)
return formatted_number | Formats a phone number for out-of-country dialing purposes.
Note that in this version, if the number was entered originally using
alpha characters and this version of the number is stored in raw_input,
this representation of the number will be used rather than the digit
representation. Grouping information, as specified by characters such as
"-" and " ", will be retained.
Caveats:
- This will not produce good results if the country calling code is both
present in the raw input _and_ is the start of the national
number. This is not a problem in the regions which typically use alpha
numbers.
- This will also not produce good results if the raw input has any
grouping information within the first three digits of the national
number, and if the function needs to strip preceding digits/words in
the raw input before these digits. Normally people group the first
three digits together so this is not a huge problem - and will be fixed
if it proves to be so.
Arguments:
numobj -- The phone number that needs to be formatted.
region_calling_from -- The region where the call is being placed.
Returns the formatted phone number |
375,667 | def decrypt(*args, **kwargs):
try:
return legacy_decrypt(*args, **kwargs)
except (NotYetValid, Expired) as e:
raise e
except (Error, ValueError) as e:
return spec_compliant_decrypt(*args, **kwargs) | Decrypts legacy or spec-compliant JOSE token.
First attempts to decrypt the token in a legacy mode
(https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-19).
If it is not a valid legacy token then attempts to decrypt it in a
spec-compliant way (http://tools.ietf.org/html/rfc7519) |
375,668 | def stats(self) -> pd.DataFrame:
key = ["icao24", "callsign"] if self.flight_ids is None else "flight_id"
return (
self.data.groupby(key)[["timestamp"]]
.count()
.sort_values("timestamp", ascending=False)
.rename(columns={"timestamp": "count"})
) | Statistics about flights contained in the structure.
Useful for a meaningful representation. |
375,669 | def cross_entropy_reward_loss(logits, actions, rewards, name=None):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=actions, logits=logits, name=name)
return tf.reduce_sum(tf.multiply(cross_entropy, rewards)) | Calculate the loss for Policy Gradient Network.
Parameters
----------
logits : tensor
The network outputs without softmax. This function implements softmax inside.
actions : tensor or placeholder
The agent actions.
rewards : tensor or placeholder
The rewards.
Returns
--------
Tensor
The TensorFlow loss function.
Examples
----------
>>> states_batch_pl = tf.placeholder(tf.float32, shape=[None, D])
>>> network = InputLayer(states_batch_pl, name='input')
>>> network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='relu1')
>>> network = DenseLayer(network, n_units=3, name='out')
>>> probs = network.outputs
>>> sampling_prob = tf.nn.softmax(probs)
>>> actions_batch_pl = tf.placeholder(tf.int32, shape=[None])
>>> discount_rewards_batch_pl = tf.placeholder(tf.float32, shape=[None])
>>> loss = tl.rein.cross_entropy_reward_loss(probs, actions_batch_pl, discount_rewards_batch_pl)
>>> train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss) |
375,670 | def call(self, itemMethod):
item = itemMethod.im_self
method = itemMethod.im_func.func_name
return self.batchController.getProcess().addCallback(
CallItemMethod(storepath=item.store.dbdir,
storeid=item.storeID,
method=method).do) | Invoke the given bound item method in the batch process.
Return a Deferred which fires when the method has been invoked. |
375,671 | def mark_validation(institute_id, case_name, variant_id):
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_obj = store.variant(variant_id)
user_obj = store.user(current_user.email)
validate_type = request.form[] or None
link = url_for(, institute_id=institute_id, case_name=case_name,
variant_id=variant_id)
store.validate(institute_obj, case_obj, user_obj, link, variant_obj, validate_type)
return redirect(request.referrer or link) | Mark a variant as sanger validated. |
375,672 | def _get_variant_effect(cls, variants, ref_sequence):
assert len(variants) != 0
var_types = [x.var_type for x in variants]
if len(set(var_types)) != 1:
return , ,
var_type = var_types[0]
assert set([x.ref_name for x in variants]) == set([ref_sequence.id])
codon_starts = [AssemblyVariants._get_codon_start(0, x.ref_start) for x in variants]
assert len(set(codon_starts)) == 1
codon_start = codon_starts[0]
aa_start = codon_start // 3
ref_codon = pyfastaq.sequences.Fasta(, ref_sequence[codon_start:codon_start+3])
ref_aa = ref_codon.translate()
if var_type == pymummer.variant.SNP:
new_codon = list(ref_codon.seq)
for v in variants:
new_codon[v.ref_start - codon_start] = v.qry_base
new_codon = pyfastaq.sequences.Fasta(, .join(new_codon))
qry_aa = new_codon.translate()
if ref_aa.seq == qry_aa.seq:
return (, , aa_start)
elif qry_aa.seq == :
return (, ref_aa.seq + str(aa_start + 1) + , aa_start)
else:
return (, ref_aa.seq + str(aa_start + 1) + qry_aa.seq, aa_start)
elif var_type in [pymummer.variant.INS, pymummer.variant.DEL]:
if len(variants) > 1:
return , , aa_start
var = variants[0]
if var_type == pymummer.variant.INS:
new_seq = pyfastaq.sequences.Fasta(, var.qry_base)
else:
new_seq = pyfastaq.sequences.Fasta(, var.ref_base)
if len(new_seq) % 3 != 0:
return (, ref_aa.seq + str(aa_start + 1) + , aa_start)
new_seq_aa = new_seq.translate()
if in new_seq_aa.seq:
return (, ref_aa.seq + str(aa_start + 1) + , aa_start)
elif var_type == pymummer.variant.INS:
ref_codon_after_ins = pyfastaq.sequences.Fasta(, ref_sequence[codon_start+3:codon_start+6])
aa_after_ins = ref_codon_after_ins.translate()
return (, ref_aa.seq + str(aa_start + 1) + + aa_after_ins.seq + str(aa_start + 2) + + new_seq_aa.seq , aa_start)
else:
if len(new_seq) == 3:
return (, ref_aa.seq + str(aa_start + 1) + , aa_start)
else:
assert len(new_seq) % 3 == 0
ref_codon_after_ins = pyfastaq.sequences.Fasta(, ref_sequence[codon_start+3:codon_start+6])
aa_after_ins = ref_codon_after_ins.translate()
return (, ref_aa.seq + str(aa_start + 1)+ + aa_after_ins.seq + str(aa_start + 2) + , aa_start)
else:
return (, , aa_start) | variants = list of variants in the same codon.
returns type of variant (cannot handle more than one indel in the same codon). |
375,673 | def get_groups(self, username):
try:
return self.users[username][]
except Exception as e:
raise UserDoesntExist(username, self.backend_name) | Get a user's groups
:param username: 'key' attribute of the user
:type username: string
:rtype: list of groups |
375,674 | def submit(self, command, blocksize, job_name="parsl.auto"):
if self.provisioned_blocks >= self.max_blocks:
logger.warn("[%s] at capacity, cannot add more blocks now", self.label)
return None
if blocksize < self.nodes_per_block:
blocksize = self.nodes_per_block
job_name = "parsl.{0}.{1}".format(job_name, time.time())
script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
logger.debug("Requesting blocksize:%s nodes_per_block:%s tasks_per_node:%s", blocksize, self.nodes_per_block,
self.tasks_per_node)
job_config = {}
job_config["submit_script_dir"] = self.channel.script_dir
job_config["nodes"] = self.nodes_per_block
job_config["task_blocks"] = self.nodes_per_block * self.tasks_per_node
job_config["nodes_per_block"] = self.nodes_per_block
job_config["tasks_per_node"] = self.tasks_per_node
job_config["walltime"] = self.walltime
job_config["overrides"] = self.overrides
job_config["user_script"] = command
job_config["user_script"] = self.launcher(command,
self.tasks_per_node,
self.nodes_per_block)
logger.debug("Writing submit script")
self._write_submit_script(template_string, script_path, job_name, job_config)
channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
submit_options =
if self.queue is not None:
submit_options = .format(submit_options, self.queue)
if self.account is not None:
submit_options = .format(submit_options, self.account)
launch_cmd = "qsub {0} {1}".format(submit_options, channel_script_path)
retcode, stdout, stderr = self.channel.execute_wait(launch_cmd, 10)
job_id = None
if retcode == 0:
for line in stdout.split():
if line.strip():
job_id = line.strip()
self.resources[job_id] = {: job_id, : , : blocksize}
else:
message = "Command failed with return code {}".format(launch_cmd, retcode)
if (stdout is not None) and (stderr is not None):
message += "\nstderr:{}\nstdout{}".format(stderr.strip(), stdout.strip())
logger.error(message)
return job_id | Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1 : ! This is illegal. tasks_per_node should be integer
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float)
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job |
375,675 | def _init_scratch(self):
scratch = np.zeros((self._num_shards, self._shard_size),
dtype=np.complex64)
scratch_handle = mem_manager.SharedMemManager.create_array(
scratch.view(dtype=np.float32))
self._shared_mem_dict[] = scratch_handle | Initializes a scratch pad equal in size to the wavefunction. |
375,676 | def SetColor(self, color):
self.SetFillColor(color)
self.SetLineColor(color)
self.SetMarkerColor(color) | *color* may be any color understood by ROOT or matplotlib.
Set all color attributes with one method call.
For full documentation of accepted *color* arguments, see
:class:`rootpy.plotting.style.Color`. |
375,677 | def alias_field(model, field):
for part in field.split(LOOKUP_SEP)[:-1]:
model = associate_model(model,part)
return model.__name__ + "-" + field.split(LOOKUP_SEP)[-1] | Return the prefix name of a field |
375,678 | def get_contingency_tables(self):
return np.array([ContingencyTable(*ct) for ct in self.contingency_tables.values]) | Create an Array of ContingencyTable objects for each probability threshold.
Returns:
Array of ContingencyTable objects |
375,679 | def driver_name(self):
self._driver_name, value = self.get_attr_string(self._driver_name, )
return value | Returns the name of the motor driver that loaded this device. See the list
of [supported devices] for a list of drivers. |
375,680 | def _hash_categorical(c, encoding, hash_key):
values = np.asarray(c.categories.values)
hashed = hash_array(values, encoding, hash_key,
categorize=False)
mask = c.isna()
if len(hashed):
result = hashed.take(c.codes)
else:
result = np.zeros(len(mask), dtype=)
if mask.any():
result[mask] = np.iinfo(np.uint64).max
return result | Hash a Categorical by hashing its categories, and then mapping the codes
to the hashes
Parameters
----------
c : Categorical
encoding : string, default 'utf8'
hash_key : string key to encode, default to _default_hash_key
Returns
-------
ndarray of hashed values array, same size as len(c) |
375,681 | def _fs_match(pattern, filename, sep, follow, symlinks):
matched = False
base = None
m = pattern.fullmatch(filename)
if m:
matched = True
if not follow:
groups = m.groups()
last = len(groups)
for i, star in enumerate(m.groups(), 1):
if star:
parts = star.strip(sep).split(sep)
if base is None:
base = filename[:m.start(i)]
for part in parts:
base = os.path.join(base, part)
is_link = symlinks.get(base, None)
if is_link is not None:
matched = not is_link
elif i != last or os.path.isdir(base):
is_link = os.path.islink(base)
symlinks[base] = is_link
matched = not is_link
if not matched:
break
if matched:
break
return matched | Match path against the pattern.
Since `globstar` doesn't match symlinks (unless `FOLLOW` is enabled), we must look for symlinks.
If we identify a symlink in a `globstar` match, we know this result should not actually match. |
375,682 | def commit(
self,
confirm=False,
confirm_delay=None,
check=False,
comment="",
and_quit=False,
delay_factor=1,
):
delay_factor = self.select_delay_factor(delay_factor)
if check and (confirm or confirm_delay or comment):
raise ValueError("Invalid arguments supplied with commit check")
if confirm_delay and not confirm:
raise ValueError(
"Invalid arguments supplied to commit method both confirm and check"
)
command_string = "commit"
commit_marker = "Commit complete."
if check:
command_string = "commit check"
commit_marker = "Validation complete"
elif confirm:
if confirm_delay:
command_string = "commit confirmed " + str(confirm_delay)
else:
command_string = "commit confirmed"
commit_marker = "commit confirmed will be automatically rolled back in"
if comment:
if in comment:
raise ValueError("Invalid comment contains double quote")
comment = .format(comment)
command_string += " comment " + comment
if and_quit:
command_string += " and-quit"
output = self.config_mode()
if and_quit:
prompt = self.base_prompt
output += self.send_command_expect(
command_string,
expect_string=prompt,
strip_prompt=True,
strip_command=True,
delay_factor=delay_factor,
)
else:
output += self.send_command_expect(
command_string,
strip_prompt=True,
strip_command=True,
delay_factor=delay_factor,
)
if commit_marker not in output:
raise ValueError(
"Commit failed with the following errors:\n\n{0}".format(output)
)
return output | Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
Automatically enters configuration mode
default:
command_string = commit
check and (confirm or confirm_dely or comment):
Exception
confirm_delay and no confirm:
Exception
confirm:
confirm_delay option
comment option
command_string = commit confirmed or commit confirmed <confirm_delay>
check:
command_string = commit check |
375,683 | def apply_integer_offsets(image2d, offx, offy):
if type(offx) != int or type(offy) != int:
raise ValueError()
naxis2, naxis1 = image2d.shape
image2d_shifted = np.zeros((naxis2, naxis1))
non = lambda s: s if s < 0 else None
mom = lambda s: max(0,s)
image2d_shifted[mom(offy):non(offy), mom(offx):non(offx)] = \
image2d[mom(-offy):non(-offy), mom(-offx):non(-offx)]
return image2d_shifted | Apply global (integer) offsets to image.
Parameters
----------
image2d : numpy array
Input image
offx : int
Offset in the X direction (must be integer).
offy : int
Offset in the Y direction (must be integer).
Returns
-------
image2d_shifted : numpy array
Shifted image |
375,684 | def wait_and_ignore(condition, timeout=WTF_TIMEOUT_MANAGER.NORMAL, sleep=0.5):
ll wait for the condition to become true, but will
not error if the condition isn
try:
return wait_until(condition, timeout, sleep)
except:
pass | Waits wrapper that'll wait for the condition to become true, but will
not error if the condition isn't met.
Args:
condition (lambda) - Lambda expression to wait for to evaluate to True.
Kwargs:
timeout (number) : Maximum number of seconds to wait.
sleep (number) : Sleep time to wait between iterations.
Example::
wait_and_ignore(lambda: driver.find_element_by_id("success").is_displayed(),
timeout=30,
sleep=0.5)
is equivalent to::
end_time = datetime.now() + timedelta(seconds=30)
while datetime.now() < end_time:
try:
if driver.find_element_by_id("success").is_displayed():
break;
except:
pass
time.sleep(0.5) |
375,685 | def visit_Expr(self, node: AST, dfltChaining: bool = True) -> str:
return self.visit(node.value) | Return representation of nested expression. |
375,686 | def _dict_native_ok(d):
if len(d) >= 256:
return False
for k in d:
if not isinstance(k, six.string_types):
return False
return True | This checks if a dictionary can be saved natively as HDF5 groups.
If it can't, it will be pickled. |
375,687 | def wait(self, condition, interval, *args):
hid = lambda: + str(uuid.uuid1())[:8]
handle = hid()
if len(args):
element_handle = hid()
self.browser.execute_script(
.format(element_handle)
)
for el in args:
if isinstance(el, string_types):
self.browser.execute_script(.format(
element_handle, % el))
else:
self.browser.execute_script(
.format(element_handle), el)
if len(args) == 1:
condition = condition.replace(, .format(
element_handle, if isinstance(args[0], string_types) else ))
else:
regex = r
results = re.findall(regex, condition)
for result in results:
pos = eval(result[1])
if pos + 1 <= len(args):
condition = condition.replace(result[0], .format(
element_handle, pos, if isinstance(args[pos], string_types) else ))
self.browser.execute_script(
% (handle, condition, handle, handle, \
element_handle, interval))
else:
self.browser.execute_script(
% (
handle, condition, handle, handle, interval))
return handle | :Description: Create an interval in vm.window, will clear interval after condition met.
:param condition: Condition in javascript to pass to interval.
:example: '$el.innerText == "cheesecake"'
:example: '$el[0].disabled && $el[1].disabled'
:type condition: string
:param interval: Time in milliseconds to execute interval.
:type interval: int or float
:param *args: WebElement or selector of condition element.
:type *args: tuple
:return: string |
375,688 | def get_class_name(class_key, classification_key):
classification = definition(classification_key)
for the_class in classification[]:
if the_class.get() == class_key:
return the_class.get(, class_key)
return class_key | Helper to get class name from a class_key of a classification.
:param class_key: The key of the class.
:type class_key: str
:type classification_key: The key of a classification.
:param classification_key: str
:returns: The name of the class.
:rtype: str |
375,689 | def build_target_areas(entry):
target_areas = []
areas = str(entry[]).split()
for area in areas:
target_areas.append(area.strip())
return target_areas | Cleanup the raw target areas description string |
375,690 | def uavionix_adsb_out_cfg_encode(self, ICAO, callsign, emitterType, aircraftSize, gpsOffsetLat, gpsOffsetLon, stallSpeed, rfSelect):
return MAVLink_uavionix_adsb_out_cfg_message(ICAO, callsign, emitterType, aircraftSize, gpsOffsetLat, gpsOffsetLon, stallSpeed, rfSelect) | Static data to configure the ADS-B transponder (send within 10 sec of
a POR and every 10 sec thereafter)
ICAO : Vehicle address (24 bit) (uint32_t)
callsign : Vehicle identifier (8 characters, null terminated, valid characters are A-Z, 0-9, " " only) (char)
emitterType : Transmitting vehicle type. See ADSB_EMITTER_TYPE enum (uint8_t)
aircraftSize : Aircraft length and width encoding (table 2-35 of DO-282B) (uint8_t)
gpsOffsetLat : GPS antenna lateral offset (table 2-36 of DO-282B) (uint8_t)
gpsOffsetLon : GPS antenna longitudinal offset from nose [if non-zero, take position (in meters) divide by 2 and add one] (table 2-37 DO-282B) (uint8_t)
stallSpeed : Aircraft stall speed in cm/s (uint16_t)
rfSelect : ADS-B transponder reciever and transmit enable flags (uint8_t) |
375,691 | def _read_journal(self):
root = self._filesystem.inspect_get_roots()[0]
inode = self._filesystem.stat()[]
with NamedTemporaryFile(buffering=0) as tempfile:
self._filesystem.download_inode(root, inode, tempfile.name)
journal = usn_journal(tempfile.name)
return parse_journal(journal) | Extracts the USN journal from the disk and parses its content. |
375,692 | def set(self, subscribed, ignored):
sub = {: subscribed, : ignored}
json = self._json(self._put(self._api, data=dumps(sub)), 200)
self.__init__(json, self._session) | Set the user's subscription for this subscription
:param bool subscribed: (required), determines if notifications should
be received from this thread.
:param bool ignored: (required), determines if notifications should be
ignored from this thread. |
375,693 | def hourly_horizontal_infrared(self):
sky_cover = self._sky_condition.hourly_sky_cover
db_temp = self._dry_bulb_condition.hourly_values
dp_temp = self._humidity_condition.hourly_dew_point_values(
self._dry_bulb_condition)
horiz_ir = []
for i in xrange(len(sky_cover)):
horiz_ir.append(
calc_horizontal_infrared(sky_cover[i], db_temp[i], dp_temp[i]))
return self._get_daily_data_collections(
energyflux.HorizontalInfraredRadiationIntensity(), , horiz_ir) | A data collection containing hourly horizontal infrared intensity in W/m2. |
375,694 | def sign(self):
return -1 if self.type in (Account.TYPES.asset, Account.TYPES.expense) else 1 | Returns 1 if a credit should increase the value of the
account, or -1 if a credit should decrease the value of the
account.
This is based on the account type as is standard accounting practice.
The signs can be derrived from the following expanded form of the
accounting equation:
Assets = Liabilities + Equity + (Income - Expenses)
Which can be rearranged as:
0 = Liabilities + Equity + Income - Expenses - Assets
Further details here: https://en.wikipedia.org/wiki/Debits_and_credits |
375,695 | def dtype(self):
mx_dtype = ctypes.c_int()
check_call(_LIB.MXNDArrayGetDType(
self.handle, ctypes.byref(mx_dtype)))
return _DTYPE_MX_TO_NP[mx_dtype.value] | Data-type of the array's elements.
Returns
-------
numpy.dtype
This NDArray's data type.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> x.dtype
<type 'numpy.float32'>
>>> y = mx.nd.zeros((2,3), dtype='int32')
>>> y.dtype
<type 'numpy.int32'> |
375,696 | def _gen_ticket(prefix=None, lg=settings.CAS_TICKET_LEN):
random_part = u.join(
random.choice(
string.ascii_letters + string.digits
) for _ in range(lg - len(prefix or "") - 1)
)
if prefix is not None:
return u % (prefix, random_part)
else:
return random_part | Generate a ticket with prefix ``prefix`` and length ``lg``
:param unicode prefix: An optional prefix (probably ST, PT, PGT or PGTIOU)
:param int lg: The length of the generated ticket (with the prefix)
:return: A randomlly generated ticket of length ``lg``
:rtype: unicode |
375,697 | def block_splitter(data, block_size):
buf = []
for i, datum in enumerate(data):
buf.append(datum)
if len(buf) == block_size:
yield buf
buf = []
if buf:
yield buf | Creates a generator by slicing ``data`` into chunks of ``block_size``.
>>> data = range(10)
>>> list(block_splitter(data, 2))
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
If ``data`` cannot be evenly divided by ``block_size``, the last block will
simply be the remainder of the data. Example:
>>> data = range(10)
>>> list(block_splitter(data, 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
If the ``block_size`` is greater than the total length of ``data``, a
single block will be generated:
>>> data = range(3)
>>> list(block_splitter(data, 4))
[[0, 1, 2]]
:param data:
Any iterable. If ``data`` is a generator, it will be exhausted,
obviously.
:param int block_site:
Desired (maximum) block size. |
375,698 | def getmakeidfobject(idf, key, name):
idfobject = idf.getobject(key, name)
if not idfobject:
return idf.newidfobject(key, Name=name)
else:
return idfobject | get idfobject or make it if it does not exist |
375,699 | def has_permissions(**perms):
def predicate(ctx):
ch = ctx.channel
permissions = ch.permissions_for(ctx.author)
missing = [perm for perm, value in perms.items() if getattr(permissions, perm, None) != value]
if not missing:
return True
raise MissingPermissions(missing)
return check(predicate) | A :func:`.check` that is added that checks if the member has all of
the permissions necessary.
The permissions passed in must be exactly like the properties shown under
:class:`.discord.Permissions`.
This check raises a special exception, :exc:`.MissingPermissions`
that is inherited from :exc:`.CheckFailure`.
Parameters
------------
perms
An argument list of permissions to check for.
Example
---------
.. code-block:: python3
@bot.command()
@commands.has_permissions(manage_messages=True)
async def test(ctx):
await ctx.send('You can manage messages.') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.