Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
13,000 | def pwgen(length=None):
if length is None:
length = random.choice(range(35, 45))
alphanumeric_chars = [
l for l in (string.ascii_letters + string.digits)
if l not in ]
random_generator = random.SystemRandom()
random_chars = [
random_generator.choice(alphanumeric_chars) for _ in range(length)]
return(.join(random_chars)) | Generate a random pasword. |
13,001 | def set_urlroute_rules(rules=None):
global __url_route_rules__
__url_route_rules__ = []
for k, v in (rules or {}).values():
__url_route_rules__.append((re.compile(k), v)) | rules should be (pattern, replace)
e.g.: ('/admin', '/demo') |
13,002 | def fcs(bits):
fcs = FCS()
for bit in bits:
yield bit
fcs.update_bit(bit)
digest = bitarray(endian="little")
digest.frombytes(fcs.digest())
for bit in digest:
yield bit | Append running bitwise FCS CRC checksum to end of generator |
13,003 | def getBoundsColor(self, nNumOutputColors, flCollisionBoundsFadeDistance):
fn = self.function_table.getBoundsColor
pOutputColorArray = HmdColor_t()
pOutputCameraColor = HmdColor_t()
fn(byref(pOutputColorArray), nNumOutputColors, flCollisionBoundsFadeDistance, byref(pOutputCameraColor))
return pOutputColorArray, pOutputCameraColor | Get the current chaperone bounds draw color and brightness |
13,004 | def query_tag_values(self, metric_type=None, **tags):
tagql = self._transform_tags(**tags)
return self._get(self._get_metrics_tags_url(self._get_url(metric_type)) + .format(tagql)) | Query for possible tag values.
:param metric_type: A MetricType to be queried. If left to None, matches all the MetricTypes
:param tags: A dict of tag key/value pairs. Uses Hawkular-Metrics tag query language for syntax |
13,005 | def from_exception(cls, exc):
result = cls(
condition=exc.condition,
type_=exc.TYPE,
text=exc.text
)
result.application_condition = exc.application_defined_condition
return result | Construct a new :class:`Error` payload from the attributes of the
exception.
:param exc: The exception to convert
:type exc: :class:`aioxmpp.errors.XMPPError`
:result: Newly constructed error payload
:rtype: :class:`Error`
.. versionchanged:: 0.10
The :attr:`aioxmpp.XMPPError.application_defined_condition` is now
taken over into the result. |
13,006 | def _get_tensor_like_attributes():
attrs = dict()
attrs.update((attr, _wrap_method(tf.Tensor, attr))
for attr in tf.Tensor.OVERLOADABLE_OPERATORS.union({}))
attrs.update((attr, getattr(tf.Tensor, attr))
for attr in {, , })
return attrs | Returns `Tensor` attributes related to shape and Python builtins. |
13,007 | def create_conversation(self, body, recipients, attachment_ids=None, context_code=None, filter=None, filter_mode=None, group_conversation=None, media_comment_id=None, media_comment_type=None, mode=None, scope=None, subject=None, user_note=None):
path = {}
data = {}
params = {}
data["recipients"] = recipients
if subject is not None:
data["subject"] = subject
data["body"] = body
if group_conversation is not None:
data["group_conversation"] = group_conversation
if attachment_ids is not None:
data["attachment_ids"] = attachment_ids
if media_comment_id is not None:
data["media_comment_id"] = media_comment_id
if media_comment_type is not None:
self._validate_enum(media_comment_type, ["audio", "video"])
data["media_comment_type"] = media_comment_type
if user_note is not None:
data["user_note"] = user_note
if mode is not None:
self._validate_enum(mode, ["sync", "async"])
data["mode"] = mode
if scope is not None:
self._validate_enum(scope, ["unread", "starred", "archived"])
data["scope"] = scope
if filter is not None:
data["filter"] = filter
if filter_mode is not None:
self._validate_enum(filter_mode, ["and", "or", "default or"])
data["filter_mode"] = filter_mode
if context_code is not None:
data["context_code"] = context_code
self.logger.debug("POST /api/v1/conversations with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/conversations".format(**path), data=data, params=params, no_data=True) | Create a conversation.
Create a new conversation with one or more recipients. If there is already
an existing private conversation with the given recipients, it will be
reused. |
13,008 | def open_editor(self, data=):
with NamedTemporaryFile(prefix=, suffix=, delete=False) as fp:
filepath = fp.name
with codecs.open(filepath, , ) as fp:
fp.write(data)
_logger.info(, filepath)
editor = (os.getenv() or
os.getenv() or
os.getenv() or
)
command = shlex.split(editor) + [filepath]
try:
with self.suspend():
_logger.debug(, command)
p = subprocess.Popen(command)
try:
p.communicate()
except KeyboardInterrupt:
p.terminate()
except OSError as e:
_logger.exception(e)
self.show_notification( % editor)
with codecs.open(filepath, , ) as fp:
text = fp.read()
text = self.strip_instructions(text)
try:
yield text
except exceptions.TemporaryFileError:
_logger.info()
self.show_notification( % filepath)
else:
try:
os.remove(filepath)
except OSError:
_logger.warning(, filepath)
else:
_logger.info(, filepath) | Open a file for editing using the system's default editor.
After the file has been altered, the text will be read back and the
HTML comment tag <!--INSRUCTIONS --> will be stripped. If an error
occurs inside of the context manager, the file will be preserved so
users can recover their data. Otherwise, the file will be deleted when
the context manager closes.
Params:
data (str): If provided, text will be written to the file before
opening it with the editor.
Returns:
text (str): The text that the user entered into the editor. |
13,009 | def switch_onoff(self, device, status):
if status == 1 or status == True or status == :
return self.switch_on(device)
else:
return self.switch_off(device) | Switch a Socket |
13,010 | def _replace_element_by_own_content(self, element):
if element.has_children_elements():
children = element.get_children_elements()
for child in children:
element.insert_before(child)
element.remove_node()
elif element.has_children():
element.replace_node(element.get_first_node_child()) | Replace the element by own text content.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement |
13,011 | def display(self):
for line in self.adb_shell().splitlines():
m = _DISPLAY_RE.search(line, 0)
if not m:
continue
w = int(m.group())
h = int(m.group())
return collections.namedtuple(, [, ])(w, h)
else:
w, h = self.info[], self.info[]
return collections.namedtuple(, [, ])(w, h) | Virtual keyborad may get small d.info['displayHeight'] |
13,012 | def agent_intents(self):
endpoint = self._intent_uri()
intents = self._get(endpoint)
if isinstance(intents, dict):
raise Exception(intents["status"])
return [Intent(intent_json=i) for i in intents] | Returns a list of intent json objects |
13,013 | def decrypt(self, ciphertext):
if not isinstance(ciphertext, str):
raise CiphertextTypeError("Input ciphertext is not of type string")
plaintext_length = self.getPlaintextLen(ciphertext)
ciphertext_length = self.getCiphertextLen(ciphertext)
ciphertext_complete = (len(ciphertext) >= ciphertext_length)
if ciphertext_complete is False:
raise RecoverableDecryptionError(+str(len(ciphertext))++str(ciphertext_length)+)
ciphertext = ciphertext[:ciphertext_length]
W1_start = 0
W1_end = AES.block_size
W1 = ciphertext[W1_start:W1_end]
W2_start = AES.block_size
W2_end = AES.block_size + plaintext_length
W2 = ciphertext[W2_start:W2_end]
T_start = AES.block_size + plaintext_length
T_end = AES.block_size + plaintext_length + Encrypter._MAC_LENGTH
T_expected = ciphertext[T_start:T_end]
mac = HMAC.new(self.K2, W1 + W2, SHA512)
T_actual = mac.digest()[:Encrypter._MAC_LENGTH]
if T_expected != T_actual:
raise UnrecoverableDecryptionError()
iv2_bytes = + self._ecb_enc_K1.decrypt(W1)[1:8]
counter_val = fte.bit_ops.bytes_to_long(iv2_bytes)
counter_length_in_bits = AES.block_size * 8
counter = Counter.new(
counter_length_in_bits, initial_value=counter_val)
ctr_enc = AES.new(key=self.K1,
mode=AES.MODE_CTR,
IV= * 8 + iv2_bytes,
counter=counter)
plaintext = ctr_enc.decrypt(W2)
return plaintext | Given ``ciphertext`` returns a ``plaintext`` decrypted using the keys specified in ``__init__``.
Raises ``CiphertextTypeError`` if the input ``ciphertext`` is not a string.
Raises ``RecoverableDecryptionError`` if the input ``ciphertext`` has a non-negative message length greater than the ciphertext length.
Raises ``UnrecoverableDecryptionError`` if invalid padding is detected, or the the MAC is invalid. |
13,014 | def get_campaign_name_list(self):
campaigns = self.find(, {})
campaign_names = []
for campaign in campaigns:
if in campaign:
campaign_names.append(campaign[])
return campaign_names | Returns a list of all valid campaign names
Returns:
List of strings containing all valid campaign names |
13,015 | def _insert_defaults(self):
merged = merge_defaults(self.structure, self)
self.update(merged) | Inserts default values from :attr:`StructuredDictMixin.structure`
to `self` by merging the two structures
(see :func:`monk.manipulation.merge_defaults`). |
13,016 | def download(url, target, headers=None, trackers=()):
if headers is None:
headers = {}
headers.setdefault(, +__version__)
r = requests.get(url, headers=headers, stream=True)
r.raise_for_status()
for t in trackers:
t.on_start(r)
with open(target, ) as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
for t in trackers:
t.on_chunk(chunk)
for t in trackers:
t.on_finish() | Download a file using requests.
This is like urllib.request.urlretrieve, but:
- requests validates SSL certificates by default
- you can pass tracker objects to e.g. display a progress bar or calculate
a file hash. |
13,017 | def _write_max_gradient(self)->None:
"Writes the maximum of the gradients to Tensorboard."
max_gradient = max(x.data.max() for x in self.gradients)
self._add_gradient_scalar(, scalar_value=max_gradient) | Writes the maximum of the gradients to Tensorboard. |
13,018 | def to_json(self, minimal=True):
if minimal:
return json.dumps(self.json_repr(minimal=True), cls=MarathonMinimalJsonEncoder, sort_keys=True)
else:
return json.dumps(self.json_repr(), cls=MarathonJsonEncoder, sort_keys=True) | Encode an object as a JSON string.
:param bool minimal: Construct a minimal representation of the object (ignore nulls and empty collections)
:rtype: str |
13,019 | def present(email, profile="splunk", **kwargs):
Example [email protected]
name = kwargs.get()
ret = {
: name,
: {},
: None,
:
}
target = __salt__[](email, profile=profile, user_details=True)
if not target:
if __opts__[]:
ret[] = .format(name)
return ret
result = __salt__[](
email, profile=profile, **kwargs
)
if result:
ret[].setdefault(, None)
ret[].setdefault(, .format(name))
ret[] = True
else:
ret[] = False
ret[] = .format(name)
return ret
else:
ret[] = .format(name)
if __opts__[]:
ret[] = None
return ret
result = __salt__[](
email, profile, **kwargs
)
if isinstance(result, bool) and result:
ret[] = None
ret[] = "No changes"
else:
diff = {}
for field in [, , , , , ]:
if field == :
diff[] = list(set(target.get(field, [])).symmetric_difference(set(result.get(field, []))))
elif target.get(field) != result.get(field):
diff[field] = result.get(field)
newvalues = result
ret[] = True
ret[][] = diff
ret[][] = target
ret[][] = newvalues
return ret | Ensure a user is present
.. code-block:: yaml
ensure example test user 1:
splunk.user_present:
- realname: 'Example TestUser1'
- name: 'exampleuser'
- email: '[email protected]'
- roles: ['user']
The following parameters are required:
email
This is the email of the user in splunk |
13,020 | def _write_header(f, version, flags, stream_id, opcode, length):
pack = v3_header_pack if version >= 3 else header_pack
f.write(pack(version, flags, stream_id, opcode))
write_int(f, length) | Write a CQL protocol frame header. |
13,021 | def pop_data_point(self, n):
popped = []
for k in self.ckeys:
data = list(self.c(k))
popped.append(data.pop(n))
self.insert_column(_n.array(data), k)
return popped | This will remove and return the n'th data point (starting at 0) from
all columns.
Parameters
----------
n
Index of data point to pop. |
13,022 | def _find_step_node(self, step_text):
for func, decorator in self._iter_step_func_decorators():
step = self._step_decorator_args(decorator)
arg_node = decorator.call.value[0].value
if step == step_text:
return arg_node, func
elif isinstance(step, list) and step_text in step:
step_node = arg_node[step.index(step_text)]
return step_node, func
return None, None | Find the ast node which contains the text. |
13,023 | async def get_info(self):
params = {}
if self.owner_access_key:
params[] = self.owner_access_key
rqst = Request(self.session,
, .format(self.kernel_id),
params=params)
async with rqst.fetch() as resp:
return await resp.json() | Retrieves a brief information about the compute session. |
13,024 | def create_cmdclass(develop_wrappers=None, distribute_wrappers=None, data_dirs=None):
develop_wrappers = develop_wrappers or []
distribute_wrappers = distribute_wrappers or []
data_dirs = data_dirs or []
develop_wrapper = functools.partial(wrap_command, develop_wrappers, data_dirs)
distribute_wrapper = functools.partial(wrap_command, distribute_wrappers, data_dirs)
cmdclass = dict(
develop=develop_wrapper(develop, strict=True),
sdist=distribute_wrapper(sdist, strict=True),
bdist_egg=bdist_egg if in sys.argv else bdist_egg_disabled
)
if bdist_wheel:
cmdclass[] = bdist_wheel
return cmdclass | Create a command class with the given optional wrappers.
Parameters
----------
develop_wrapper: list(str), optional
The cmdclass names to run before running other commands
distribute_wrappers: list(str), optional
The cmdclass names to run before running other commands
data_dirs: list(str), optional.
The directories containing static data. |
13,025 | def post_customer_preferences(self, **kwargs):
kwargs[] = True
if kwargs.get():
return self.post_customer_preferences_with_http_info(**kwargs)
else:
(data) = self.post_customer_preferences_with_http_info(**kwargs)
return data | Update selected fields of customer preferences # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_customer_preferences(async_req=True)
>>> result = thread.get()
:param async_req bool
:param CustomerPreferencesUpdating body:
:return: CustomerPreferences
If the method is called asynchronously,
returns the request thread. |
13,026 | def is_rpm_installed():
try:
version_result = subprocess.run(["rpm", "--usage"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
rpm_installed = not version_result.returncode
except FileNotFoundError:
rpm_installed = False
return rpm_installed | Tests if the rpm command is present. |
13,027 | async def create(
cls, name: str, architecture: str, content: io.IOBase, *,
title: str = "",
filetype: BootResourceFileType = BootResourceFileType.TGZ,
chunk_size=(1 << 22), progress_callback=None):
if not in name:
raise ValueError(
"name must be in format os/release; missing ")
if not in architecture:
raise ValueError(
"architecture must be in format arch/subarch; missing ")
if not content.readable():
raise ValueError("content must be readable")
elif not content.seekable():
raise ValueError("content must be seekable")
if chunk_size <= 0:
raise ValueError(
"chunk_size must be greater than 0, not %d" % chunk_size)
size, sha256 = calc_size_and_sha265(content, chunk_size)
resource = cls._object(await cls._handler.create(
name=name, architecture=architecture, title=title,
filetype=filetype.value, size=str(size), sha256=sha256))
newest_set = max(resource.sets, default=None)
assert newest_set is not None
resource_set = resource.sets[newest_set]
assert len(resource_set.files) == 1
rfile = list(resource_set.files.values())[0]
if rfile.complete:
return resource
else:
await cls._upload_chunks(
rfile, content, chunk_size, progress_callback)
return cls._object.read(resource.id) | Create a `BootResource`.
Creates an uploaded boot resource with `content`. The `content` is
uploaded in chunks of `chunk_size`. `content` must be seekable as the
first pass through the `content` will calculate the size and sha256
value then the second pass will perform the actual upload.
:param name: Name of the boot resource. Must be in format 'os/release'.
:type name: `str`
:param architecture: Architecture of the boot resource. Must be in
format 'arch/subarch'.
:type architecture: `str`
:param content: Content of the boot resource.
:type content: `io.IOBase`
:param title: Title of the boot resource.
:type title: `str`
:param filetype: Type of file in content.
:type filetype: `str`
:param chunk_size: Size in bytes to upload to MAAS in chunks.
(Default is 4 MiB).
:type chunk_size: `int`
:param progress_callback: Called to inform the current progress of the
upload. One argument is passed with the progress as a precentage.
If the resource was already complete and no content
needed to be uploaded then this callback will never be called.
:type progress_callback: Callable
:returns: Create boot resource.
:rtype: `BootResource`. |
13,028 | def _format_lat(self, lat):
if self.ppd in [4, 16, 64, 128]:
return None
else:
if lat < 0:
return map(lambda x: "{0:0>2}"
.format(int(np.abs(x))) + , self._map_center(, lat))
else:
return map(lambda x: "{0:0>2}"
.format(int(x)) + , self._map_center(, lat)) | Returned a formated latitude format for the file |
13,029 | def set_conn(self, **kwargs):
log.setLevel(kwargs.get(,self.log_level))
conn_name = kwargs.get("name")
if not conn_name:
raise NameError("a connection requires a : %s" % kwargs)
elif self.conns.get(conn_name):
raise KeyError("connection has already been set" % conn_name)
if not kwargs.get("active", True):
log.warning("Connection is set as inactive" % conn_name)
return
conn_type = kwargs.get("conn_type")
if not conn_type or conn_type not in self.conn_mapping.nested:
err_msg = ["a connection requires a valid :\n",
"%s"]
raise NameError("".join(err_msg) % (list(self.conn_mapping.nested)))
log.info("Setting connection", conn_name)
if conn_type == "triplestore":
conn = make_tstore_conn(kwargs)
else:
conn = RdfwConnections[conn_type][kwargs[]](**kwargs)
self.conns[conn_name] = conn
self.__is_initialized__ = True | takes a connection and creates the connection |
13,030 | def message(msg, *args):
clear_progress()
text = (msg % args)
sys.stdout.write(text + ) | Program message output. |
13,031 | def check_log_files_and_publish_updates(self):
anything_published = False
for file_info in self.open_file_infos:
assert not file_info.file_handle.closed
lines_to_publish = []
max_num_lines_to_read = 100
for _ in range(max_num_lines_to_read):
next_line = file_info.file_handle.readline()
if next_line == "":
break
if next_line[-1] == "\n":
next_line = next_line[:-1]
lines_to_publish.append(next_line)
filename = file_info.filename.split("/")[-1]
is_worker = (filename.startswith("worker")
and (filename.endswith("out")
or filename.endswith("err")))
if is_worker and file_info.file_position == 0:
if (len(lines_to_publish) > 0 and
lines_to_publish[0].startswith("Ray worker pid: ")):
file_info.worker_pid = int(
lines_to_publish[0].split(" ")[-1])
lines_to_publish = lines_to_publish[1:]
file_info.file_position = file_info.file_handle.tell()
if len(lines_to_publish) > 0 and is_worker:
self.redis_client.publish(
ray.gcs_utils.LOG_FILE_CHANNEL,
json.dumps({
"ip": self.ip,
"pid": file_info.worker_pid,
"lines": lines_to_publish
}))
anything_published = True
return anything_published | Get any changes to the log files and push updates to Redis.
Returns:
True if anything was published and false otherwise. |
13,032 | def segment_to_line(document, coords):
"polyline with 2 vertices using <line> tag"
return setattribs(
document.createElement(),
x1 = coords[0],
y1 = coords[1],
x2 = coords[2],
y2 = coords[3],
) | polyline with 2 vertices using <line> tag |
13,033 | def restore_type(self, type):
mapping = {
: ,
: ,
: ,
: ,
: ,
: ,
: ,
}
if type not in mapping:
message = % type
raise tableschema.exceptions.StorageError(message)
return mapping[type] | Restore type from BigQuery |
13,034 | def get_common_prefix(z):
name_list = z.namelist()
if name_list and all(n.startswith(name_list[0]) for n in name_list[1:]):
return name_list[0]
return None | Get common directory in a zip file if any. |
13,035 | def get_codes():
cache_filename = os.path.join(os.path.dirname(__file__), , )
data = []
for line in open(cache_filename, ):
if not line.startswith():
data.append(line.split())
return data | >> get_codes()
ISO ISO3 ISO-Numeric fips Country Capital Area(in sq km) Population Continent tld CurrencyCode CurrencyName Phone Postal Code Format Postal Code Regex Languages geonameid neighbours EquivalentFipsCode |
13,036 | def _parse_request_arguments(self, request):
inference_addresses = request.args.get().split()
model_names = request.args.get().split()
model_versions = request.args.get().split()
model_signatures = request.args.get().split()
if len(model_names) != len(inference_addresses):
raise common_utils.InvalidUserInputError( +
)
return inference_addresses, model_names, model_versions, model_signatures | Parses comma separated request arguments
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_version', 'model_signature'.
Returns:
A tuple of lists for model parameters |
13,037 | def general_attention(key, context, hidden_size, projected_align=False):
if hidden_size % 2 != 0:
raise ValueError("hidden size must be dividable by two")
batch_size = tf.shape(context)[0]
max_num_tokens, token_size = context.get_shape().as_list()[-2:]
r_context = tf.reshape(context, shape=[-1, max_num_tokens, token_size])
projected_key = \
tf.layers.dense(key, hidden_size, kernel_initializer=xav())
r_projected_key = tf.reshape(projected_key, shape=[-1, hidden_size, 1])
lstm_fw_cell = tf.nn.rnn_cell.LSTMCell(hidden_size//2)
lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(hidden_size//2)
(output_fw, output_bw), states = \
tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fw_cell,
cell_bw=lstm_bw_cell,
inputs=r_context,
dtype=tf.float32)
bilstm_output = tf.concat([output_fw, output_bw], -1)
attn = tf.nn.softmax(tf.matmul(bilstm_output, r_projected_key), dim=1)
if projected_align:
log.info("Using projected attention alignment")
t_context = tf.transpose(bilstm_output, [0, 2, 1])
output = tf.reshape(tf.matmul(t_context, attn),
shape=[batch_size, -1, hidden_size])
else:
log.info("Using without projected attention alignment")
t_context = tf.transpose(r_context, [0, 2, 1])
output = tf.reshape(tf.matmul(t_context, attn),
shape=[batch_size, -1, token_size])
return output | It is a implementation of the Luong et al. attention mechanism with general score. Based on the paper:
https://arxiv.org/abs/1508.04025 "Effective Approaches to Attention-based Neural Machine Translation"
Args:
key: A tensorflow tensor with dimensionality [None, None, key_size]
context: A tensorflow tensor with dimensionality [None, None, max_num_tokens, token_size]
hidden_size: Number of units in hidden representation
projected_align: Using bidirectional lstm for hidden representation of context.
If true, beetween input and attention mechanism insert layer of bidirectional lstm with dimensionality [hidden_size].
If false, bidirectional lstm is not used.
Returns:
output: Tensor at the output with dimensionality [None, None, hidden_size] |
13,038 | def rollforward(self, date):
if self.onOffset(date):
return date
else:
return date + YearEnd(month=self.month) | Roll date forward to nearest end of year |
13,039 | def get_path_for_core_element(self, core_element_id):
def check_function(row_iter, iter_found):
row_id = self.tree_store.get_value(row_iter, self.ID_STORAGE_ID)
if len(row_id) == len(core_element_id):
if row_id == core_element_id:
iter_found.append(self.tree_store.get_path(row_iter))
found_paths = []
self.iter_tree_with_handed_function(check_function, found_paths)
return found_paths[0] if found_paths else None | Get path to the row representing core element described by handed core_element_id
:param list core_element_id: Core element identifier used in the respective list store column
:rtype: tuple
:return: path |
13,040 | def _clone(self, **kwargs):
params = copy.deepcopy(self._params)
if self._iexact:
params.update(iexact=self._iexact)
params.update(**kwargs)
clone = self.__class__(**params)
return clone | Create a clone of this collection. The only param in the
initial collection is the filter context. Each chainable
filter is added to the clone and returned to preserve
previous iterators and their returned elements.
:return: :class:`.ElementCollection` |
13,041 | def unscale_dict(C):
C_out = {k: _scale_dict[k] * v for k, v in C.items()}
for k in C_symm_keys[8]:
C_out[] = unscale_8(C_out[])
return C_out | Undo the scaling applied in `scale_dict`. |
13,042 | def spectral_contrast(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window=, center=True, pad_mode=,
freq=None, fmin=200.0, n_bands=6, quantile=0.02,
linear=False):
02. Proceedings.
2002 IEEE International Conference on, vol. 1, pp. 113-116.
IEEE, 2002.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
freq : None or np.ndarray [shape=(d,)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies.
fmin : float > 0
Frequency cutoff for the first bin `[0, fmin]`
Subsequent bins will cover `[fmin, 2*fmin]`, `[2*fmin, 4*fmin]`, etc.
n_bands : int > 1
number of frequency bands
quantile : float in (0, 1)
quantile for determining peaks and valleys
linear : bool
If `True`, return the linear difference of magnitudes:
`peaks - valleys`.
If `False`, return the logarithmic difference:
`log(peaks) - log(valleys)`.
Returns
-------
contrast : np.ndarray [shape=(n_bands + 1, t)]
each row of spectral contrast values corresponds to a given
octave-based frequency
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> contrast = librosa.feature.spectral_contrast(S=S, sr=sr)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis=)
>>> plt.colorbar(format=)
>>> plt.title()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(contrast, x_axis=)
>>> plt.colorbar()
>>> plt.ylabel()
>>> plt.title()
>>> plt.tight_layout()
freq.shape mismatch: expected ({:d},)n_bands must be a positive integerquantile must lie in the range (0, 1)fmin must be a positive numberFrequency band exceeds Nyquist. Reduce either fmin or n_bands.')
valley = np.zeros((n_bands + 1, S.shape[1]))
peak = np.zeros_like(valley)
for k, (f_low, f_high) in enumerate(zip(octa[:-1], octa[1:])):
current_band = np.logical_and(freq >= f_low, freq <= f_high)
idx = np.flatnonzero(current_band)
if k > 0:
current_band[idx[0] - 1] = True
if k == n_bands:
current_band[idx[-1] + 1:] = True
sub_band = S[current_band]
if k < n_bands:
sub_band = sub_band[:-1]
idx = np.rint(quantile * np.sum(current_band))
idx = int(np.maximum(idx, 1))
sortedr = np.sort(sub_band, axis=0)
valley[k] = np.mean(sortedr[:idx], axis=0)
peak[k] = np.mean(sortedr[-idx:], axis=0)
if linear:
return peak - valley
else:
return power_to_db(peak) - power_to_db(valley) | Compute spectral contrast [1]_
.. [1] Jiang, Dan-Ning, Lie Lu, Hong-Jiang Zhang, Jian-Hua Tao,
and Lian-Hong Cai.
"Music type classification by spectral contrast feature."
In Multimedia and Expo, 2002. ICME'02. Proceedings.
2002 IEEE International Conference on, vol. 1, pp. 113-116.
IEEE, 2002.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
freq : None or np.ndarray [shape=(d,)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies.
fmin : float > 0
Frequency cutoff for the first bin `[0, fmin]`
Subsequent bins will cover `[fmin, 2*fmin]`, `[2*fmin, 4*fmin]`, etc.
n_bands : int > 1
number of frequency bands
quantile : float in (0, 1)
quantile for determining peaks and valleys
linear : bool
If `True`, return the linear difference of magnitudes:
`peaks - valleys`.
If `False`, return the logarithmic difference:
`log(peaks) - log(valleys)`.
Returns
-------
contrast : np.ndarray [shape=(n_bands + 1, t)]
each row of spectral contrast values corresponds to a given
octave-based frequency
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> contrast = librosa.feature.spectral_contrast(S=S, sr=sr)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(contrast, x_axis='time')
>>> plt.colorbar()
>>> plt.ylabel('Frequency bands')
>>> plt.title('Spectral contrast')
>>> plt.tight_layout() |
13,043 | def identifiers(dataset_uri):
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
for i in dataset.identifiers:
click.secho(i) | List the item identifiers in the dataset. |
13,044 | def _dist_to_trans(self, dist):
rae = np.array([self.roll, self.azimuth, self.elevation]) * np.pi / 180
sro, saz, sel = np.sin(rae)
cro, caz, cel = np.cos(rae)
dx = (+ dist[0] * (cro * caz + sro * sel * saz)
+ dist[1] * (sro * caz - cro * sel * saz))
dy = (+ dist[0] * (cro * saz - sro * sel * caz)
+ dist[1] * (sro * saz + cro * sel * caz))
dz = (- dist[0] * sro * cel + dist[1] * cro * cel)
return dx, dy, dz | Convert mouse x, y movement into x, y, z translations |
13,045 | def list_of_lists_to_dict(l):
d = {}
for key, val in l:
d.setdefault(key, []).append(val)
return d | Convert list of key,value lists to dict
[['id', 1], ['id', 2], ['id', 3], ['foo': 4]]
{'id': [1, 2, 3], 'foo': [4]} |
13,046 | def resolve_weak_types(storage, debug=False):
for run in storage[]:
prev_strong = prev_type = run[]
start, length = run[], run[]
chars = storage[][start:start+length]
for _ch in chars:
bidi_type = _ch[]
if bidi_type == :
_ch[] = bidi_type = prev_type
if bidi_type == and prev_strong == :
_ch[] =
if bidi_type in (, , ):
prev_strong = bidi_type
prev_type = _ch[]
for _ch in chars:
if _ch[] == :
_ch[] =
for idx in range(1, len(chars) - 1):
bidi_type = chars[idx][]
prev_type = chars[idx-1][]
next_type = chars[idx+1][]
if bidi_type == and (prev_type == next_type == ):
chars[idx][] =
if bidi_type == and prev_type == next_type and \
prev_type in (, ):
chars[idx][] = prev_type
for idx in range(len(chars)):
if chars[idx][] == :
for et_idx in range(idx-1, -1, -1):
if chars[et_idx][] == :
chars[et_idx][] =
else:
break
for et_idx in range(idx+1, len(chars)):
if chars[et_idx][] == :
chars[et_idx][] =
else:
break
for _ch in chars:
if _ch[] in (, , ):
_ch[] =
prev_strong = run[]
for _ch in chars:
if _ch[] == and prev_strong == :
_ch[] =
if _ch[] in (, ):
prev_strong = _ch[]
if debug:
debug_storage(storage, runs=True) | Reslove weak type rules W1 - W3.
See: http://unicode.org/reports/tr9/#Resolving_Weak_Types |
13,047 | def parse_mcast_grps(family, grp_attr):
remaining = c_int()
if not grp_attr:
raise BUG
for nla in nla_for_each_nested(grp_attr, remaining):
tb = dict()
err = nla_parse_nested(tb, CTRL_ATTR_MCAST_GRP_MAX, nla, family_grp_policy)
if err < 0:
return err
if not tb[CTRL_ATTR_MCAST_GRP_ID] or not tb[CTRL_ATTR_MCAST_GRP_NAME]:
return -NLE_MISSING_ATTR
id_ = nla_get_u32(tb[CTRL_ATTR_MCAST_GRP_ID])
name = nla_get_string(tb[CTRL_ATTR_MCAST_GRP_NAME])
err = genl_family_add_grp(family, id_, name)
if err < 0:
return err
return 0 | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L64.
Positional arguments:
family -- genl_family class instance.
grp_attr -- nlattr class instance.
Returns:
0 on success or a negative error code. |
13,048 | def play_NoteContainer(self, nc, channel=1, velocity=100):
self.notify_listeners(self.MSG_PLAY_NC, {: nc,
: channel, : velocity})
if nc is None:
return True
for note in nc:
if not self.play_Note(note, channel, velocity):
return False
return True | Play the Notes in the NoteContainer nc. |
13,049 | def _publisher_callback(self, publish_ack):
logging.debug("ack received: " + str(publish_ack).replace(, ))
self._rx_queue.append(publish_ack) | publisher callback that grpc and web socket can pass messages to
address the received message onto the queue
:param publish_ack: EventHub_pb2.Ack the ack received from either wss or grpc
:return: None |
13,050 | def ANNASSIGN(self, node):
if node.value:
self.handleNode(node.target, node)
if not isinstance(self.scope, FunctionScope):
self.deferHandleNode(node.annotation, node)
if node.value:
if isinstance(self.scope, ModuleScope):
self.deferHandleNode(node.value, node)
else:
self.handleNode(node.value, node) | Annotated assignments don't have annotations evaluated on function
scope, hence the custom implementation. Compared to the pyflakes
version, we defer evaluation of the annotations (and values on
module level). |
13,051 | def safe_file(path, suffix=None, cleanup=True):
safe_path = .format(path, suffix or uuid.uuid4())
if os.path.exists(path):
shutil.copy(path, safe_path)
try:
yield safe_path
if cleanup:
shutil.move(safe_path, path)
else:
shutil.copy(safe_path, path)
finally:
if cleanup:
safe_delete(safe_path) | A with-context that copies a file, and copies the copy back to the original file on success.
This is useful for doing work on a file but only changing its state on success.
:param str suffix: Use this suffix to create the copy. Otherwise use a random string.
:param bool cleanup: Whether or not to clean up the copy. |
13,052 | def _suppressed(self, filename, line, code):
if code in self.suppress_codes:
return True
lines = self._file_lines(filename)
if not lines:
return False
while line > len(lines):
line = line - 1
relevant_line = lines[line - 1]
try:
suppressions_function = relevant_line.split("
if suppressions_function.startswith("suppress("):
return code in _parse_suppressions(suppressions_function)
except IndexError:
above_line = lines[max(0, line - 2)]
suppressions_function = above_line.strip()[1:].strip()
if suppressions_function.startswith("suppress("):
return code in _parse_suppressions(suppressions_function)
finally:
pass | Return true if linter error code is suppressed inline.
The suppression format is suppress(CODE1,CODE2,CODE3) etc. |
13,053 | def exec_resize(self, exec_id, height=None, width=None):
if isinstance(exec_id, dict):
exec_id = exec_id.get()
params = {: height, : width}
url = self._url("/exec/{0}/resize", exec_id)
res = self._post(url, params=params)
self._raise_for_status(res) | Resize the tty session used by the specified exec command.
Args:
exec_id (str): ID of the exec instance
height (int): Height of tty session
width (int): Width of tty session |
13,054 | def has_parent_bins(self, bin_id):
if self._catalog_session is not None:
return self._catalog_session.has_parent_catalogs(catalog_id=bin_id)
return self._hierarchy_session.has_parents(id_=bin_id) | Tests if the ``Bin`` has any parents.
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if the bin has parents, ``false``
otherwise
raise: NotFound - ``bin_id`` is not found
raise: NullArgument - ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
13,055 | def get_remote_client(self, target_name, user=None, password=None):
if user:
base = self.get_user_client(user, password, populate=False)
else:
base = weakproxy(self)
return RemoteXCLIClient(base, target_name, populate=True) | Returns a new client for the remote target. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client |
13,056 | def prefetch_relations(weak_queryset):
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
gfks = {}
for name, gfk in weak_queryset.model.__dict__.items():
if not isinstance(gfk, GenericForeignKey):
continue
gfks[name] = gfk
data = {}
for weak_model in weak_queryset:
for gfk_name, gfk_field in gfks.items():
related_content_type_id = getattr(
weak_model,
gfk_field.model._meta.get_field(gfk_field.ct_field).get_attname())
if not related_content_type_id:
continue
related_content_type = ContentType.objects.get_for_id(related_content_type_id)
related_object_id = int(getattr(weak_model, gfk_field.fk_field))
if related_content_type not in data.keys():
data[related_content_type] = []
data[related_content_type].append(related_object_id)
for content_type, object_ids in data.items():
model_class = content_type.model_class()
models = prefetch_relations(model_class.objects.filter(pk__in=object_ids).select_related())
for model in models:
for weak_model in weak_queryset:
for gfk_name, gfk_field in gfks.items():
related_content_type_id = getattr(
weak_model,
gfk_field.model._meta.get_field(gfk_field.ct_field).get_attname())
if not related_content_type_id:
continue
related_content_type = ContentType.objects.get_for_id(related_content_type_id)
related_object_id = int(getattr(weak_model, gfk_field.fk_field))
if related_object_id != model.pk:
continue
if related_content_type != content_type:
continue
setattr(weak_model, gfk_name, model)
return weak_queryset | FROM: https://djangosnippets.org/snippets/2492/
Consider such a model class::
class Action(models.Model):
actor_content_type = models.ForeignKey(ContentType,related_name='actor')
actor_object_id = models.PositiveIntegerField()
actor = GenericForeignKey('actor_content_type','actor_object_id')
And dataset::
Action(actor=user1).save()
Action(actor=user2).save()
This will hit the user table once for each action::
[a.actor for a in Action.objects.all()]
Whereas this will hit the user table once::
[a.actor for a in prefetch_relations(Action.objects.all())]
Actually, the example above will hit the database N+1 times, where N is
the number of actions. But with prefetch_relations(), the database will be
hit N+1 times where N is the number of distinct content types.
Note that prefetch_relations() is recursive.
Here an example, making a list with prefetch_relations(), and then without
prefetch_relations(). See the number of database hits after each test.
In [1]: from django import db; from prefetch_relations import prefetch_relations
In [2]: db.reset_queries()
In [3]: x = [(a.actor, a.action_object, a.target) for a in prefetch_relations(Action.objects.all().order_by('-pk'))]
In [4]: print len(db.connection.queries)
34
In [5]: db.reset_queries()
In [6]: print len(db.connection.queries)
0
In [7]: x = [(a.actor, a.action_object, a.target) for a in Action.objects.all().order_by('-pk')]
In [8]: print len(db.connection.queries)
396 |
13,057 | def create_database_session(engine):
try:
Session = sessionmaker(bind=engine)
return Session()
except OperationalError as e:
raise DatabaseError(error=e.orig.args[1], code=e.orig.args[0]) | Connect to the database |
13,058 | def _update_property_from_dict(self, section, option, new_properties):
try:
property_name = "{0}_{1}".format(section, option)
self.set(section, option, new_properties[property_name])
except KeyError:
pass | Update a config property value with a new property value
Property name must be equal to 'Section_option' of config property
:param section: config section
:param option: config option
:param new_properties: dict with new properties values |
13,059 | def handle(self, *args, **kwargs):
listener = ExecutorListener(redis_params=getattr(settings, , {}).get(, {}))
def _killer(signum, frame):
listener.terminate()
signal(SIGINT, _killer)
signal(SIGTERM, _killer)
async def _runner():
if kwargs[]:
await listener.clear_queue()
async with listener:
pass
loop = asyncio.new_event_loop()
loop.run_until_complete(_runner())
loop.close() | Run the executor listener. This method never returns. |
13,060 | def _check_cats(cats, vtypes, df, prep, callers):
out = []
for cat in cats:
all_vals = []
for vtype in vtypes:
vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers)
all_vals.extend(vals)
if sum(all_vals) / float(len(all_vals)) > 2:
out.append(cat)
if len(out) == 0:
return cats
else:
return out | Only include categories in the final output if they have values. |
13,061 | def next_population(self, population, fitnesses):
return common.make_population(self._population_size,
self._generate_solution) | Make a new population after each optimization iteration.
Args:
population: The population current population of solutions.
fitnesses: The fitness associated with each solution in the population
Returns:
list; a list of solutions. |
13,062 | def get_path(brain_or_object):
if is_brain(brain_or_object):
return brain_or_object.getPath()
return "/".join(get_object(brain_or_object).getPhysicalPath()) | Calculate the physical path of this object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Physical path of the object
:rtype: string |
13,063 | def plugins_post_process(self, retcode):
logger.info("Post-processing test...")
self.publish("core", "stage", "post_process")
for plugin in self.plugins.values():
logger.debug("Post-process %s", plugin)
try:
logger.debug("RC before: %s", retcode)
retcode = plugin.post_process(retcode)
logger.debug("RC after: %s", retcode)
except Exception:
logger.error("Failed post-processing plugin %s", plugin, exc_info=True)
if not retcode:
retcode = 1
return retcode | Call post_process() on all plugins |
13,064 | def _add_snps(
self,
snps,
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
):
discrepant_positions = pd.DataFrame()
discrepant_genotypes = pd.DataFrame()
if snps.snps is None:
return discrepant_positions, discrepant_genotypes
build = snps.build
source = [s.strip() for s in snps.source.split(",")]
if not snps.build_detected:
print("build not detected, assuming build {}".format(snps.build))
if self._build is None:
self._build = build
elif self._build != build:
print(
"build / assembly mismatch between current build of SNPs and SNPs being loaded"
)
snps = self._double_single_alleles(snps.snps, "X")
if self._snps is None:
self._source.extend(source)
self._snps = snps
else:
common_snps = self._snps.join(snps, how="inner", rsuffix="_added")
discrepant_positions = common_snps.loc[
(common_snps["chrom"] != common_snps["chrom_added"])
| (common_snps["pos"] != common_snps["pos_added"])
]
if 0 < len(discrepant_positions) < discrepant_snp_positions_threshold:
print(
str(len(discrepant_positions)) + " SNP positions were discrepant; "
"keeping original positions"
)
if save_output:
self._discrepant_positions_file_count += 1
lineage.save_df_as_csv(
discrepant_positions,
self._output_dir,
self.get_var_name()
+ "_discrepant_positions_"
+ str(self._discrepant_positions_file_count)
+ ".csv",
)
elif len(discrepant_positions) >= discrepant_snp_positions_threshold:
print(
"too many SNPs differ in position; ensure same genome build is being used"
)
return discrepant_positions, discrepant_genotypes
common_snps = common_snps.loc[
~common_snps["genotype"].isnull()
& ~common_snps["genotype_added"].isnull()
]
discrepant_genotypes = common_snps.loc[
(
(common_snps["genotype"].str.len() == 1)
& (common_snps["genotype_added"].str.len() == 1)
& ~(
common_snps["genotype"].str[0]
== common_snps["genotype_added"].str[0]
)
)
| (
(common_snps["genotype"].str.len() == 2)
& (common_snps["genotype_added"].str.len() == 2)
& ~(
(
common_snps["genotype"].str[0]
== common_snps["genotype_added"].str[0]
)
& (
common_snps["genotype"].str[1]
== common_snps["genotype_added"].str[1]
)
)
& ~(
(
common_snps["genotype"].str[0]
== common_snps["genotype_added"].str[1]
)
& (
common_snps["genotype"].str[1]
== common_snps["genotype_added"].str[0]
)
)
)
]
if 0 < len(discrepant_genotypes) < discrepant_genotypes_threshold:
print(
str(len(discrepant_genotypes)) + " SNP genotypes were discrepant; "
"marking those as null"
)
if save_output:
self._discrepant_genotypes_file_count += 1
lineage.save_df_as_csv(
discrepant_genotypes,
self._output_dir,
self.get_var_name()
+ "_discrepant_genotypes_"
+ str(self._discrepant_genotypes_file_count)
+ ".csv",
)
elif len(discrepant_genotypes) >= discrepant_genotypes_threshold:
print(
"too many SNPs differ in their genotype; ensure file is for same "
"individual"
)
return discrepant_positions, discrepant_genotypes
self._source.extend(source)
self._snps = self._snps.combine_first(snps)
self._snps.loc[discrepant_genotypes.index, "genotype"] = np.nan
self._snps["pos"] = self._snps["pos"].astype(np.int64)
self._snps = sort_snps(self._snps)
return discrepant_positions, discrepant_genotypes | Add SNPs to this Individual.
Parameters
----------
snps : SNPs
SNPs to add
discrepant_snp_positions_threshold : int
see above
discrepant_genotypes_threshold : int
see above
save_output
see above
Returns
-------
discrepant_positions : pandas.DataFrame
discrepant_genotypes : pandas.DataFrame |
13,065 | def t_heredoc(self, t):
r
t.lexer.is_tabbed = False
self._init_heredoc(t)
t.lexer.begin() | r'<<\S+\r?\n |
13,066 | def untlpy2etd_ms(untl_elements, **kwargs):
degree_children = {}
date_exists = False
seen_creation = False
etd_ms_root = ETD_MS_CONVERSION_DISPATCH[]()
for element in untl_elements.children:
etd_ms_element = None
if element.tag in ETD_MS_CONVERSION_DISPATCH:
if not ark:
ark = False
if element.qualifier == :
ark = element.content
if ark is not None:
ark_identifier = ETD_MS_CONVERSION_DISPATCH[](
ark=ark,
)
etd_ms_root.add_child(ark_identifier)
if degree_children:
degree_element = ETD_MS_CONVERSION_DISPATCH[]()
degree_child_element = None
for k, v in degree_children.iteritems():
degree_child_element = ETD_MS_DEGREE_DISPATCH[k](
content=v,
)
if degree_child_element:
degree_element.add_child(degree_child_element)
etd_ms_root.add_child(degree_element)
return etd_ms_root | Convert the UNTL elements structure into an ETD_MS structure.
kwargs can be passed to the function for certain effects. |
13,067 | def set_restricted(self, obj, restricted):
if restricted:
self._restricted.add(obj)
elif obj in self._restricted:
self._restricted.remove(obj) | Set the restriction on the given object.
You can use this to signal that a certain function is restricted.
Then you can query the restriction later with :meth:`Reftrack.is_restricted`.
:param obj: a hashable object
:param restricted: True, if you want to restrict the object.
:type restricted: :class:`bool`
:returns: None
:rtype: None
:raises: None |
13,068 | def update_col(self, column_name, series):
logger.debug(.format(
column_name, self.name))
self.local[column_name] = series | Add or replace a column in the underlying DataFrame.
Parameters
----------
column_name : str
Column to add or replace.
series : pandas.Series or sequence
Column data. |
13,069 | def setOverlayTransformOverlayRelative(self, ulOverlayHandle, ulOverlayHandleParent):
fn = self.function_table.setOverlayTransformOverlayRelative
pmatParentOverlayToOverlayTransform = HmdMatrix34_t()
result = fn(ulOverlayHandle, ulOverlayHandleParent, byref(pmatParentOverlayToOverlayTransform))
return result, pmatParentOverlayToOverlayTransform | Sets the transform to relative to the transform of the specified overlay. This overlays visibility will also track the parents visibility |
13,070 | async def teardown_conn(self, context):
client_id = context.user_data
self._logger.info("Tearing down client connection: %s", client_id)
if client_id not in self.clients:
self._logger.warning("client_id %s did not exist in teardown_conn", client_id)
else:
del self.clients[client_id] | Teardown a connection from a client. |
13,071 | def train_model(best_processed_path, weight_path=, verbose=2):
x_train_char, x_train_type, y_train = prepare_feature(best_processed_path, option=)
x_test_char, x_test_type, y_test = prepare_feature(best_processed_path, option=)
validation_set = False
if os.path.isdir(os.path.join(best_processed_path, )):
validation_set = True
x_val_char, x_val_type, y_val = prepare_feature(best_processed_path, option=)
if not os.path.isdir(os.path.dirname(weight_path)):
os.makedirs(os.path.dirname(weight_path))
callbacks_list = [
ReduceLROnPlateau(),
ModelCheckpoint(
weight_path,
save_best_only=True,
save_weights_only=True,
monitor=,
mode=,
verbose=1
)
]
model = get_convo_nn2()
train_params = [(10, 256), (3, 512), (3, 2048), (3, 4096), (3, 8192)]
for (epochs, batch_size) in train_params:
print("train with {} epochs and {} batch size".format(epochs, batch_size))
if validation_set:
model.fit([x_train_char, x_train_type], y_train,
epochs=epochs, batch_size=batch_size,
verbose=verbose,
callbacks=callbacks_list,
validation_data=([x_val_char, x_val_type], y_val))
else:
model.fit([x_train_char, x_train_type], y_train,
epochs=epochs, batch_size=batch_size,
verbose=verbose,
callbacks=callbacks_list)
return model | Given path to processed BEST dataset,
train CNN model for words beginning alongside with
character label encoder and character type label encoder
Input
=====
best_processed_path: str, path to processed BEST dataset
weight_path: str, path to weight path file
verbose: int, verbost option for training Keras model
Output
======
model: keras model, keras model for tokenize prediction |
13,072 | def get_args(
self,
keep_blank_values: bool = False,
strict_parsing: bool = False,
encoding: str = "utf-8",
errors: str = "replace",
) -> RequestParameters:
if not self.parsed_args[
(keep_blank_values, strict_parsing, encoding, errors)
]:
if self.query_string:
self.parsed_args[
(keep_blank_values, strict_parsing, encoding, errors)
] = RequestParameters(
parse_qs(
qs=self.query_string,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing,
encoding=encoding,
errors=errors,
)
)
return self.parsed_args[
(keep_blank_values, strict_parsing, encoding, errors)
] | Method to parse `query_string` using `urllib.parse.parse_qs`.
This methods is used by `args` property.
Can be used directly if you need to change default parameters.
:param keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
:type keep_blank_values: bool
:param strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
:type strict_parsing: bool
:param encoding: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type encoding: str
:param errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type errors: str
:return: RequestParameters |
13,073 | def wrap(self, row: Union[Mapping[str, Any], Sequence[Any]]):
return (
self.dataclass(
**{
ident: row[column_name]
for ident, column_name in self.ids_and_column_names.items()
}
)
if isinstance(row, Mapping)
else self.dataclass(
**{ident: val for ident, val in zip(self.ids_and_column_names.keys(), row)}
)
) | Return row tuple for row. |
13,074 | def do_bc(self, arg):
token_list = self.split_tokens(arg, 1, 2)
pid, tid, address, size = self.input_breakpoint(token_list)
debug = self.debug
found = False
if size is None:
if tid is not None:
if debug.has_hardware_breakpoint(tid, address):
debug.dont_watch_variable(tid, address)
found = True
if pid is not None:
if debug.has_code_breakpoint(pid, address):
debug.dont_break_at(pid, address)
found = True
else:
if debug.has_page_breakpoint(pid, address):
debug.dont_watch_buffer(pid, address, size)
found = True
if not found:
print("Error: breakpoint not found.") | [~process] bc <address> - clear a code breakpoint
[~thread] bc <address> - clear a hardware breakpoint
[~process] bc <address-address> - clear a memory breakpoint
[~process] bc <address> <size> - clear a memory breakpoint |
13,075 | def set_authoring_nodes(self, editor):
project_node = self.default_project_node
file_node = self.register_file(editor.file, project_node)
editor_node = self.register_editor(editor, file_node)
return True | Sets the Model authoring Nodes using given editor.
:param editor: Editor to set.
:type editor: Editor
:return: Method success.
:rtype: bool |
13,076 | def _boundary(self):
boundary = None
try:
import uuid
boundary = uuid.uuid4().hex
except ImportError:
import random, sha
bits = random.getrandbits(160)
boundary = sha.new(str(bits)).hexdigest()
return boundary | Returns a random string to use as the boundary for a message.
Returns:
string. Boundary |
13,077 | def payload(self):
payload = {: self.resource_type(), : self.attributes}
if self.id:
payload[] = self.id
return payload | Renders the resource payload.
:returns: a dict representing the object to be used as payload for a request |
13,078 | def _print_napps(cls, napp_list):
mgr = NAppsManager()
enabled = mgr.get_enabled()
installed = mgr.get_installed()
napps = []
for napp, desc in sorted(napp_list):
status = if napp in installed else
status += if napp in enabled else
status = .format(status)
name = .format(*napp)
napps.append((status, name, desc))
cls.print_napps(napps) | Format the NApp list to be printed. |
13,079 | def get_2d_local_memory_v2(x, query_shape, memory_flange):
(_, height, width, depth_x) = common_layers.shape_list(x)
paddings = [[0, 0], [memory_flange[0], memory_flange[0]],
[memory_flange[1], memory_flange[1]], [0, 0]]
padded_x = tf.pad(x, paddings)
padded_x.set_shape([None, height+2*memory_flange[0],
width+2*memory_flange[1], depth_x])
num_h_memory_blocks = height//query_shape[0] + 1
num_w_memory_blocks = width//query_shape[1] + 1
x_memory_blocks = _extract_blocks(padded_x,
query_shape[0], query_shape[1])
x_width_blocks = tf.split(x_memory_blocks, num_w_memory_blocks,
2)
x_left_width = tf.concat(x_width_blocks[:num_w_memory_blocks - 1], axis=2)
x_right_width = tf.concat(x_width_blocks[1:], axis=2)
x_memory_blocks = tf.concat([x_left_width, x_right_width], axis=4)
x_height_blocks = tf.split(x_memory_blocks, num_h_memory_blocks, 1)
x_top_height = tf.concat(x_height_blocks[:num_h_memory_blocks - 1], axis=1)
x_bottom_height = tf.concat(x_height_blocks[1:], axis=1)
x = tf.concat([x_top_height, x_bottom_height], axis=3)
return x | Gathering memory blocks around query blocks. flange is half of query .
Only works if memory flanges are half of query sizes.
Args:
x: a [batch, height, width, depth tensor]
query_shape: 2-d integer list of query shape
memory_flange: 2-d integer list of memory flanges
Returns:
x: A [batch, num_h_blocks, num_w_blocks,
query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]]
tensor. |
13,080 | def close(self):
if not self.closed:
self._ipython.events.unregister(, self._fill)
self._box.close()
self.closed = True | Close and remove hooks. |
13,081 | def visit(self, node):
method = + node.__class__.__name__
if not hasattr(self, method):
raise ValueError( % node.__class__.__name__)
visitor = getattr(self, method)
if anno.hasanno(node, ):
self.active_variables = anno.getanno(node, )
pri, adj = visitor(node)
if isinstance(pri, gast.AST):
anno.setdefaultanno(pri, , adj)
else:
for node in pri:
anno.setdefaultanno(node, , adj)
if isinstance(adj, gast.AST):
anno.setdefaultanno(adj, , pri)
else:
for node in adj:
anno.setdefaultanno(node, , pri)
return pri, adj | Visit a node.
This method is largely modelled after the ast.NodeTransformer class.
Args:
node: The node to visit.
Returns:
A tuple of the primal and adjoint, each of which is a node or a list of
nodes. |
13,082 | def get(self, session):
request = TOPRequest()
self.create(self.execute(request, session))
return self.after_sales | taobao.aftersale.get 查询用户售后服务模板
查询用户设置的售后服务模板,仅返回标题和id |
13,083 | def subtract(self, years=0, months=0, weeks=0, days=0):
return self.add(years=-years, months=-months, weeks=-weeks, days=-days) | Remove duration from the instance.
:param years: The number of years
:type years: int
:param months: The number of months
:type months: int
:param weeks: The number of weeks
:type weeks: int
:param days: The number of days
:type days: int
:rtype: Date |
13,084 | def rewrite_update(clauseelement, multiparams, params):
newmultiparams = []
_multiparams = multiparams[0]
if len(_multiparams) == 0:
return clauseelement, multiparams, params
for _params in _multiparams:
newparams = {}
for key, val in _params.items():
if (
not isinstance(val, MutableDict) or
(not any(val._changed_keys) and not any(val._deleted_keys))
):
newparams[key] = val
continue
for subkey, subval in val.items():
if subkey in val._changed_keys:
newparams["{0}[]".format(key, subkey)] = subval
for subkey in val._deleted_keys:
newparams["{0}[]".format(key, subkey)] = None
newmultiparams.append(newparams)
_multiparams = (newmultiparams, )
clause = clauseelement.values(newmultiparams[0])
clause._crate_specific = True
return clause, _multiparams, params | change the params to enable partial updates
sqlalchemy by default only supports updates of complex types in the form of
"col = ?", ({"x": 1, "y": 2}
but crate supports
"col['x'] = ?, col['y'] = ?", (1, 2)
by using the `Craty` (`MutableDict`) type.
The update statement is only rewritten if an item of the MutableDict was
changed. |
13,085 | def copy(self):
features_copy = [feature.copy() for feature in self.features]
copy = type(self)(self.top.seq, circular=self.circular,
features=features_copy, name=self.name,
bottom=self.bottom.seq, run_checks=False)
return copy | Create a copy of the current instance.
:returns: A safely-editable copy of the current sequence.
:rtype: coral.DNA |
13,086 | def sort_item(iterable, number, reverse=False):
return sorted(iterable, key=itemgetter(number), reverse=reverse) | Sort the itertable according to the given number item. |
13,087 | def b58decode(val, charset=DEFAULT_CHARSET):
def _b58decode_int(val):
output = 0
for char in val:
output = output * base + charset.index(char)
return output
if isinstance(val, str):
val = val.encode()
if isinstance(charset, str):
charset = charset.encode()
base = len(charset)
if not base == 58:
raise ValueError( % base)
pad_len = len(val)
val = val.lstrip(bytes([charset[0]]))
pad_len -= len(val)
acc = _b58decode_int(val)
result = deque()
while acc > 0:
acc, mod = divmod(acc, 256)
result.appendleft(mod)
prefix = b * pad_len
return prefix + bytes(result) | Decode base58check encoded input to original raw bytes.
:param bytes val: The value to base58cheeck decode.
:param bytes charset: (optional) The character set to use for decoding.
:return: the decoded bytes.
:rtype: bytes
Usage::
>>> import base58check
>>> base58check.b58decode('\x00v\x80\xad\xec\x8e\xab\xca\xba\xc6v\xbe'
... '\x9e\x83\x85J\xde\x0b\xd2,\xdb\x0b\xb9`\xde')
b'1BoatSLRHtKNngkdXEeobR76b53LETtpyT' |
13,088 | def Validate(self, value, **_):
if value is None:
return
return rdfvalue.RDFBool(super(ProtoBoolean, self).Validate(value)) | Check that value is a valid enum. |
13,089 | def autobuild_docproject():
try:
family = utilities.get_family()
autobuild_release(family)
autobuild_documentation(family.tile)
except unit_test.IOTileException as e:
print(e.format())
Exit(1) | Autobuild a project that only contains documentation |
13,090 | def merge_rest_api_config(configs):
bind = None
connect = None
timeout = None
opentsdb_url = None
opentsdb_db = None
opentsdb_username = None
opentsdb_password = None
client_max_size = None
for config in reversed(configs):
if config.bind is not None:
bind = config.bind
if config.connect is not None:
connect = config.connect
if config.timeout is not None:
timeout = config.timeout
if config.opentsdb_url is not None:
opentsdb_url = config.opentsdb_url
if config.opentsdb_db is not None:
opentsdb_db = config.opentsdb_db
if config.opentsdb_username is not None:
opentsdb_username = config.opentsdb_username
if config.opentsdb_password is not None:
opentsdb_password = config.opentsdb_password
if config.client_max_size is not None:
client_max_size = config.client_max_size
return RestApiConfig(
bind=bind,
connect=connect,
timeout=timeout,
opentsdb_url=opentsdb_url,
opentsdb_db=opentsdb_db,
opentsdb_username=opentsdb_username,
opentsdb_password=opentsdb_password,
client_max_size=client_max_size) | Given a list of PathConfig objects, merges them into a single PathConfig,
giving priority in the order of the configs (first has highest priority). |
13,091 | def sanitize(self):
super(MapRegisterMessage, self).sanitize()
if not isinstance(self.proxy_map_reply, bool):
raise ValueError()
if not isinstance(self.xtr_id, numbers.Integral) \
or self.xtr_id < 0 or self.xtr_id >= 2 ** 128:
raise ValueError()
if not isinstance(self.site_id, numbers.Integral) \
or self.site_id < 0 or self.site_id >= 2 ** 64:
raise ValueError()
if not isinstance(self.for_rtr, bool):
raise ValueError()
if not isinstance(self.want_map_notify, bool):
raise ValueError()
if self.nonce != :
pass
if self.key_id not in (KEY_ID_NONE, KEY_ID_HMAC_SHA_1_96,
KEY_ID_HMAC_SHA_256_128):
raise ValueError()
if not isinstance(self.authentication_data, bytes):
raise ValueError()
for record in self.records:
if not isinstance(record, MapRegisterRecord):
raise ValueError()
record.sanitize() | Check if the current settings conform to the LISP specifications and
fix them where possible. |
13,092 | def addAEMOD(rh):
rh.printSysLog("Enter changeVM.addAEMOD")
invokeScript = "invokeScript.sh"
trunkFile = "aemod.doscript"
fileClass = "X"
tempDir = tempfile.mkdtemp()
if os.path.isfile(rh.parms[]):
if rh.parms[].startswith("/"):
s = rh.parms[]
tmpAEScript = s[s.rindex("/") + 1:]
else:
tmpAEScript = rh.parms[]
shutil.copyfile(rh.parms[], tempDir + "/" + tmpAEScript)
conf = "
baseName = os.path.basename(rh.parms[])
parm = "/bin/bash %s %s \n" % (baseName, rh.parms[])
fh = open(tempDir + "/" + invokeScript, "w")
fh.write(conf)
fh.write(parm)
fh.close()
tar = tarfile.open(tempDir + "/" + trunkFile, "w")
for file in os.listdir(tempDir):
tar.add(tempDir + "/" + file, arcname=file)
tar.close()
punch2reader(rh, rh.userid, tempDir + "/" + trunkFile, fileClass)
shutil.rmtree(tempDir)
else:
shutil.rmtree(tempDir)
msg = msgs.msg[][1] % (modId, rh.parms[])
rh.printLn("ES", msg)
rh.updateResults(msgs.msg[][0])
rh.printSysLog("Exit changeVM.addAEMOD, rc: " +
str(rh.results[]))
return rh.results[] | Send an Activation Modification Script to the virtual machine.
Input:
Request Handle with the following properties:
function - 'CHANGEVM'
subfunction - 'AEMOD'
userid - userid of the virtual machine
parms['aeScript'] - File specification of the AE script
parms['invparms'] - invparms operand
Output:
Request Handle updated with the results.
Return code - 0: ok
Return code - 4: input error, rs - 11 AE script not found |
13,093 | def set_axis_labels(self, x_var=None, y_var=None):
if x_var is not None:
if x_var in self.data.coords:
self._x_var = x_var
self.set_xlabels(label_from_attrs(self.data[x_var]))
else:
self.set_xlabels(x_var)
if y_var is not None:
if y_var in self.data.coords:
self._y_var = y_var
self.set_ylabels(label_from_attrs(self.data[y_var]))
else:
self.set_ylabels(y_var)
return self | Set axis labels on the left column and bottom row of the grid. |
13,094 | def tabular(client, records):
from renku.models._tabulate import tabulate
echo_via_pager(
tabulate(
records,
headers=OrderedDict((
(, None),
(, ),
(, None),
(, ),
)),
)
) | Format dataset files with a tabular output.
:param client: LocalClient instance.
:param records: Filtered collection. |
13,095 | def CreateReply(self, **attributes):
return Packet(id=self.id, secret=self.secret,
authenticator=self.authenticator, dict=self.dict,
**attributes) | Create a new packet as a reply to this one. This method
makes sure the authenticator and secret are copied over
to the new instance. |
13,096 | def clone(self, run=True):
params = {}
if run:
params.update({: })
extra = {
: self.__class__.__name__,
: {: self.id, : run}
}
logger.info(, extra=extra)
task_data = self._api.post(
url=self._URL[].format(id=self.id), params=params).json()
return Task(api=self._api, **task_data) | Clone task
:param run: run task after cloning
:return: Task object. |
13,097 | def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
if not outdir:
outdir = self.structure_dir
if not outdir:
raise ValueError()
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
if self.num_structures_experimental == 0:
log.debug(.format(self.id))
return
downloaded_pdb_ids = []
for s in self.get_experimental_structures():
log.debug(.format(s.id))
s.download_structure_file(outdir=outdir, file_type=pdb_file_type, force_rerun=force_rerun, load_header_metadata=True)
downloaded_pdb_ids.append(s.id)
return downloaded_pdb_ids | Download ALL mapped experimental structures to the protein structures directory.
Args:
outdir (str): Path to output directory, if protein structures directory not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
Returns:
list: List of PDB IDs that were downloaded
Todo:
* Parse mmtf or PDB file for header information, rather than always getting the cif file for header info |
13,098 | def superclasses(self, inherited=False):
data = clips.data.DataObject(self._env)
lib.EnvClassSuperclasses(
self._env, self._cls, data.byref, int(inherited))
for klass in classes(self._env, data.value):
yield klass | Iterate over the superclasses of the class.
This function is the Python equivalent
of the CLIPS class-superclasses command. |
13,099 | def pngout(ext_args):
args = _PNGOUT_ARGS + [ext_args.old_filename, ext_args.new_filename]
extern.run_ext(args)
return _PNG_FORMAT | Run the external program pngout on the file. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.