code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def get_labels(self, depth=None):
"""
Returns a list of labels created by this reference.
Parameters
----------
depth : integer or ``None``
If not ``None``, defines from how many reference levels to
retrieve labels from.
Returns
-------
out : list of ``Label``
List containing the labels in this cell and its references.
"""
if not isinstance(self.ref_cell, Cell):
return []
if self.rotation is not None:
ct = numpy.cos(self.rotation * numpy.pi / 180.0)
st = numpy.sin(self.rotation * numpy.pi / 180.0)
st = numpy.array([-st, st])
if self.magnification is not None:
mag = numpy.array([self.magnification, self.magnification])
if self.origin is not None:
orgn = numpy.array(self.origin)
if self.x_reflection:
xrefl = numpy.array([1, -1], dtype='int')
cell_labels = self.ref_cell.get_labels(depth=depth)
labels = []
for ii in range(self.columns):
for jj in range(self.rows):
spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])
for clbl in cell_labels:
lbl = libCopy.deepcopy(clbl)
if self.magnification:
lbl.position = lbl.position * mag + spc
else:
lbl.position = lbl.position + spc
if self.x_reflection:
lbl.position = lbl.position * xrefl
if self.rotation is not None:
lbl.position = lbl.position * ct + lbl.position[::-1] * st
if self.origin is not None:
lbl.position = lbl.position + orgn
labels.append(lbl)
return labels | Returns a list of labels created by this reference.
Parameters
----------
depth : integer or ``None``
If not ``None``, defines from how many reference levels to
retrieve labels from.
Returns
-------
out : list of ``Label``
List containing the labels in this cell and its references. |
def show_compatibility_message(self, message):
"""
Show compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle('Compatibility Check')
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show() | Show compatibility message. |
def pop_configuration(self):
"""
Pushes the currently active configuration from the stack of
configurations managed by this mapping.
:raises IndexError: If there is only one configuration in the stack.
"""
if len(self.__configurations) == 1:
raise IndexError('Can not pop the last configuration from the '
'stack of configurations.')
self.__configurations.pop()
self.__mapped_attr_cache.clear() | Pushes the currently active configuration from the stack of
configurations managed by this mapping.
:raises IndexError: If there is only one configuration in the stack. |
def get_plaintext_document_body(fpath, keep_layout=False):
"""Given a file-path to a full-text, return a list of unicode strings
whereby each string is a line of the fulltext.
In the case of a plain-text document, this simply means reading the
contents in from the file. In the case of a PDF however,
this means converting the document to plaintext.
It raises UnknownDocumentTypeError if the document is not a PDF or
plain text.
@param fpath: (string) - the path to the fulltext file
@return: (list) of strings - each string being a line in the document.
"""
textbody = []
mime_type = magic.from_file(fpath, mime=True)
if mime_type == "text/plain":
with open(fpath, "r") as f:
textbody = [line.decode("utf-8") for line in f.readlines()]
elif mime_type == "application/pdf":
textbody = convert_PDF_to_plaintext(fpath, keep_layout)
else:
raise UnknownDocumentTypeError(mime_type)
return textbody | Given a file-path to a full-text, return a list of unicode strings
whereby each string is a line of the fulltext.
In the case of a plain-text document, this simply means reading the
contents in from the file. In the case of a PDF however,
this means converting the document to plaintext.
It raises UnknownDocumentTypeError if the document is not a PDF or
plain text.
@param fpath: (string) - the path to the fulltext file
@return: (list) of strings - each string being a line in the document. |
def unsubscribe_from_candles(self, pair, timeframe=None, **kwargs):
"""Unsubscribe to the passed pair's OHLC data channel.
:param timeframe: str, {1m, 5m, 15m, 30m, 1h, 3h, 6h, 12h,
1D, 7D, 14D, 1M}
:param kwargs:
:return:
"""
valid_tfs = ['1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D',
'7D', '14D', '1M']
if timeframe:
if timeframe not in valid_tfs:
raise ValueError("timeframe must be any of %s" % valid_tfs)
else:
timeframe = '1m'
identifier = ('candles', pair, timeframe)
pair = 't' + pair if not pair.startswith('t') else pair
key = 'trade:' + timeframe + ':' + pair
self._unsubscribe('candles', identifier, key=key, **kwargs) | Unsubscribe to the passed pair's OHLC data channel.
:param timeframe: str, {1m, 5m, 15m, 30m, 1h, 3h, 6h, 12h,
1D, 7D, 14D, 1M}
:param kwargs:
:return: |
def _invert(self):
"""
Invert coverage data from {test_context: {file: line}}
to {file: {test_context: line}}
"""
result = defaultdict(dict)
for test_context, src_context in six.iteritems(self.data):
for src, lines in six.iteritems(src_context):
result[src][test_context] = lines
return result | Invert coverage data from {test_context: {file: line}}
to {file: {test_context: line}} |
def decompress(databasepath, database_name, compression, compressed_file):
"""
Decompress the provided file using the appropriate library
:param databasepath: Name and path of where the database files are to be downloaded
:param database_name: Name of the database e.g. sipprverse
:param compression: STR MOB-suite databases are .zip files, while OLC databases are .tar.gz
:param compressed_file: Compressed file to process
"""
# Extract the databases from the archives
if os.path.isfile(compressed_file):
if compression == 'tar':
logging.info('Extracting {dbname} from archives'.format(dbname=database_name))
with tarfile.open(compressed_file, 'r') as tar:
# Decompress the archive
tar.extractall(path=databasepath)
elif compression == 'gz':
with gzip.open(compressed_file, 'rb') as gz:
file_name = os.path.basename(os.path.splitext(compressed_file)[0])
output_file = os.path.join(databasepath,
database_name,
file_name)
logging.info('Extracting {file_name} from archives'.format(file_name=file_name))
with open(output_file, 'wb') as output:
shutil.copyfileobj(gz, output)
else:
logging.info('Extracting {dbname} from archives'.format(dbname=database_name))
with zipfile.ZipFile(compressed_file, 'r') as zip_file:
zip_file.extractall(path=databasepath)
# Delete the archive file
os.remove(compressed_file) | Decompress the provided file using the appropriate library
:param databasepath: Name and path of where the database files are to be downloaded
:param database_name: Name of the database e.g. sipprverse
:param compression: STR MOB-suite databases are .zip files, while OLC databases are .tar.gz
:param compressed_file: Compressed file to process |
def annual_heating_design_day_990(self):
"""A design day object representing the annual 99.0% heating design day."""
if bool(self._winter_des_day_dict) is True:
return DesignDay.from_ashrae_dict_heating(
self._winter_des_day_dict, self.location, True,
self._stand_press_at_elev)
else:
return None | A design day object representing the annual 99.0% heating design day. |
def delete_namespaced_limit_range(self, name, namespace, **kwargs):
"""
delete a LimitRange
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_limit_range(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the LimitRange (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_limit_range_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_limit_range_with_http_info(name, namespace, **kwargs)
return data | delete a LimitRange
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_limit_range(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the LimitRange (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread. |
def get_mockup_motor(self, motor):
""" Gets the equivalent :class:`~pypot.primitive.primitive.MockupMotor`. """
return next((m for m in self.robot.motors if m.name == motor.name), None) | Gets the equivalent :class:`~pypot.primitive.primitive.MockupMotor`. |
def run_container(image,
name=None,
skip_translate=None,
ignore_collisions=False,
validate_ip_addrs=True,
client_timeout=salt.utils.docker.CLIENT_TIMEOUT,
bg=False,
replace=False,
force=False,
networks=None,
**kwargs):
'''
.. versionadded:: 2018.3.0
Equivalent to ``docker run`` on the Docker CLI. Runs the container, waits
for it to exit, and returns the container's logs when complete.
.. note::
Not to be confused with :py:func:`docker.run
<salt.modules.dockermod.run>`, which provides a :py:func:`cmd.run
<salt.modules.cmdmod.run>`-like interface for executing commands in a
running container.
This function accepts the same arguments as :py:func:`docker.create
<salt.modules.dockermod.create>`, with the exception of ``start``. In
addition, it accepts the arguments from :py:func:`docker.logs
<salt.modules.dockermod.logs>`, with the exception of ``follow``, to
control how logs are returned. Finally, the ``bg`` argument described below
can be used to optionally run the container in the background (the default
behavior is to block until the container exits).
bg : False
If ``True``, this function will not wait for the container to exit and
will not return its logs. It will however return the container's name
and ID, allowing for :py:func:`docker.logs
<salt.modules.dockermod.logs>` to be used to view the logs.
.. note::
The logs will be inaccessible once the container exits if
``auto_remove`` is set to ``True``, so keep this in mind.
replace : False
If ``True``, and if the named container already exists, this will
remove the existing container. The default behavior is to return a
``False`` result when the container already exists.
force : False
If ``True``, and the named container already exists, *and* ``replace``
is also set to ``True``, then the container will be forcibly removed.
Otherwise, the state will not proceed and will return a ``False``
result.
networks
Networks to which the container should be connected. If automatic IP
configuration is being used, the networks can be a simple list of
network names. If custom IP configuration is being used, then this
argument must be passed as a dictionary.
CLI Examples:
.. code-block:: bash
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh
# Run container in the background
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh bg=True
# Connecting to two networks using automatic IP configuration
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks=net1,net2
# net1 using automatic IP, net2 using static IPv4 address
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks='{"net1": {}, "net2": {"ipv4_address": "192.168.27.12"}}'
'''
if kwargs.pop('inspect', True) and not resolve_image_id(image):
pull(image, client_timeout=client_timeout)
removed_ids = None
if name is not None:
try:
pre_state = __salt__['docker.state'](name)
except CommandExecutionError:
pass
else:
if pre_state == 'running' and not (replace and force):
raise CommandExecutionError(
'Container \'{0}\' exists and is running. Run with '
'replace=True and force=True to force removal of the '
'existing container.'.format(name)
)
elif not replace:
raise CommandExecutionError(
'Container \'{0}\' exists. Run with replace=True to '
'remove the existing container'.format(name)
)
else:
# We don't have to try/except this, we want it to raise a
# CommandExecutionError if we fail to remove the existing
# container so that we gracefully abort before attempting to go
# any further.
removed_ids = rm_(name, force=force)
log_kwargs = {}
for argname in get_client_args('logs')['logs']:
try:
log_kwargs[argname] = kwargs.pop(argname)
except KeyError:
pass
# Ignore the stream argument if passed
log_kwargs.pop('stream', None)
kwargs, unused_kwargs = _get_create_kwargs(
skip_translate=skip_translate,
ignore_collisions=ignore_collisions,
validate_ip_addrs=validate_ip_addrs,
**kwargs)
# _get_create_kwargs() will have processed auto_remove and put it into the
# host_config, so check the host_config to see whether or not auto_remove
# was enabled.
auto_remove = kwargs.get('host_config', {}).get('AutoRemove', False)
if unused_kwargs:
log.warning(
'The following arguments were ignored because they are not '
'recognized by docker-py: %s', sorted(unused_kwargs)
)
if networks:
if isinstance(networks, six.string_types):
networks = {x: {} for x in networks.split(',')}
if not isinstance(networks, dict) \
or not all(isinstance(x, dict)
for x in six.itervalues(networks)):
raise SaltInvocationError('Invalid format for networks argument')
log.debug(
'docker.create: creating container %susing the following '
'arguments: %s',
'with name \'{0}\' '.format(name) if name is not None else '',
kwargs
)
time_started = time.time()
# Create the container
ret = _client_wrapper('create_container', image, name=name, **kwargs)
if removed_ids:
ret['Replaces'] = removed_ids
if name is None:
name = inspect_container(ret['Id'])['Name'].lstrip('/')
ret['Name'] = name
def _append_warning(ret, msg):
warnings = ret.pop('Warnings', None)
if warnings is None:
warnings = [msg]
elif isinstance(ret, list):
warnings.append(msg)
else:
warnings = [warnings, msg]
ret['Warnings'] = warnings
exc_info = {'return': ret}
try:
if networks:
try:
for net_name, net_conf in six.iteritems(networks):
__salt__['docker.connect_container_to_network'](
ret['Id'],
net_name,
**net_conf)
except CommandExecutionError as exc:
# Make an effort to remove the container if auto_remove was
# enabled
if auto_remove:
try:
rm_(name)
except CommandExecutionError as rm_exc:
exc_info.setdefault('other_errors', []).append(
'Failed to auto_remove container: {0}'.format(
rm_exc)
)
# Raise original exception with additonal info
raise CommandExecutionError(exc.__str__(), info=exc_info)
# Start the container
output = []
start_(ret['Id'])
if not bg:
# Can't use logs() here because we've disabled "stream" in that
# function. Also, note that if you want to troubleshoot this for loop
# in a debugger like pdb or pudb, you'll want to use auto_remove=False
# when running the function, since the container will likely exit
# before you finish stepping through with a debugger. If the container
# exits during iteration, the next iteration of the generator will
# raise an exception since the container will no longer exist.
try:
for line in _client_wrapper('logs',
ret['Id'],
stream=True,
timestamps=False):
output.append(salt.utils.stringutils.to_unicode(line))
except CommandExecutionError:
msg = (
'Failed to get logs from container. This may be because '
'the container exited before Salt was able to attach to '
'it to retrieve the logs. Consider setting auto_remove '
'to False.'
)
_append_warning(ret, msg)
# Container has exited, note the elapsed time
ret['Time_Elapsed'] = time.time() - time_started
_clear_context()
if not bg:
ret['Logs'] = ''.join(output)
if not auto_remove:
try:
cinfo = inspect_container(ret['Id'])
except CommandExecutionError:
_append_warning(
ret, 'Failed to inspect container after running')
else:
cstate = cinfo.get('State', {})
cstatus = cstate.get('Status')
if cstatus != 'exited':
_append_warning(
ret, 'Container state is not \'exited\'')
ret['ExitCode'] = cstate.get('ExitCode')
except CommandExecutionError as exc:
try:
exc_info.update(exc.info)
except (TypeError, ValueError):
# In the event exc.info wasn't a dict (extremely unlikely), append
# it to other_errors as a fallback.
exc_info.setdefault('other_errors', []).append(exc.info)
# Re-raise with all of the available additional info
raise CommandExecutionError(exc.__str__(), info=exc_info)
return ret | .. versionadded:: 2018.3.0
Equivalent to ``docker run`` on the Docker CLI. Runs the container, waits
for it to exit, and returns the container's logs when complete.
.. note::
Not to be confused with :py:func:`docker.run
<salt.modules.dockermod.run>`, which provides a :py:func:`cmd.run
<salt.modules.cmdmod.run>`-like interface for executing commands in a
running container.
This function accepts the same arguments as :py:func:`docker.create
<salt.modules.dockermod.create>`, with the exception of ``start``. In
addition, it accepts the arguments from :py:func:`docker.logs
<salt.modules.dockermod.logs>`, with the exception of ``follow``, to
control how logs are returned. Finally, the ``bg`` argument described below
can be used to optionally run the container in the background (the default
behavior is to block until the container exits).
bg : False
If ``True``, this function will not wait for the container to exit and
will not return its logs. It will however return the container's name
and ID, allowing for :py:func:`docker.logs
<salt.modules.dockermod.logs>` to be used to view the logs.
.. note::
The logs will be inaccessible once the container exits if
``auto_remove`` is set to ``True``, so keep this in mind.
replace : False
If ``True``, and if the named container already exists, this will
remove the existing container. The default behavior is to return a
``False`` result when the container already exists.
force : False
If ``True``, and the named container already exists, *and* ``replace``
is also set to ``True``, then the container will be forcibly removed.
Otherwise, the state will not proceed and will return a ``False``
result.
networks
Networks to which the container should be connected. If automatic IP
configuration is being used, the networks can be a simple list of
network names. If custom IP configuration is being used, then this
argument must be passed as a dictionary.
CLI Examples:
.. code-block:: bash
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh
# Run container in the background
salt myminion docker.run_container myuser/myimage command=/usr/local/bin/myscript.sh bg=True
# Connecting to two networks using automatic IP configuration
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks=net1,net2
# net1 using automatic IP, net2 using static IPv4 address
salt myminion docker.run_container myuser/myimage command='perl /scripts/sync.py' networks='{"net1": {}, "net2": {"ipv4_address": "192.168.27.12"}}' |
def _dict_subset(keys, master_dict):
'''
Return a dictionary of only the subset of keys/values specified in keys
'''
return dict([(k, v) for k, v in six.iteritems(master_dict) if k in keys]) | Return a dictionary of only the subset of keys/values specified in keys |
def commit_transaction(self):
"""Commit a multi-statement transaction.
.. versionadded:: 3.7
"""
self._check_ended()
retry = False
state = self._transaction.state
if state is _TxnState.NONE:
raise InvalidOperation("No transaction started")
elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY):
# Server transaction was never started, no need to send a command.
self._transaction.state = _TxnState.COMMITTED_EMPTY
return
elif state is _TxnState.ABORTED:
raise InvalidOperation(
"Cannot call commitTransaction after calling abortTransaction")
elif state is _TxnState.COMMITTED:
# We're explicitly retrying the commit, move the state back to
# "in progress" so that _in_transaction returns true.
self._transaction.state = _TxnState.IN_PROGRESS
retry = True
try:
self._finish_transaction_with_retry("commitTransaction", retry)
except ConnectionFailure as exc:
# We do not know if the commit was successfully applied on the
# server or if it satisfied the provided write concern, set the
# unknown commit error label.
exc._remove_error_label("TransientTransactionError")
_reraise_with_unknown_commit(exc)
except WTimeoutError as exc:
# We do not know if the commit has satisfied the provided write
# concern, add the unknown commit error label.
_reraise_with_unknown_commit(exc)
except OperationFailure as exc:
if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES:
# The server reports errorLabels in the case.
raise
# We do not know if the commit was successfully applied on the
# server or if it satisfied the provided write concern, set the
# unknown commit error label.
_reraise_with_unknown_commit(exc)
finally:
self._transaction.state = _TxnState.COMMITTED | Commit a multi-statement transaction.
.. versionadded:: 3.7 |
def next(self):
"""Next point in iteration
"""
x, y = next(self.scan)
xr = -x if self.rx else x
yr = -y if self.ry else y
return xr, yr | Next point in iteration |
def hostinterface_update(interfaceid, **kwargs):
'''
.. versionadded:: 2016.3.0
Update host interface
.. note::
This function accepts all standard hostinterface: keyword argument
names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/hostinterface/object#host_interface
:param interfaceid: ID of the hostinterface to update
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: ID of the updated host interface, False on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.hostinterface_update 6 ip_=0.0.0.2
'''
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'hostinterface.update'
params = {"interfaceid": interfaceid}
params = _params_extend(params, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']['interfaceids']
else:
raise KeyError
except KeyError:
return ret | .. versionadded:: 2016.3.0
Update host interface
.. note::
This function accepts all standard hostinterface: keyword argument
names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/hostinterface/object#host_interface
:param interfaceid: ID of the hostinterface to update
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: ID of the updated host interface, False on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.hostinterface_update 6 ip_=0.0.0.2 |
def masks(list_of_index_lists, n):
"""Make an array in which rows store 1d mask arrays from list of index lists.
Parameters
----------
n : int
Maximal index / number of samples.
"""
# make a list of mask arrays, it's easier to store
# as there is a hdf5 equivalent
for il,l in enumerate(list_of_index_lists):
mask = np.zeros(n,dtype=bool)
mask[l] = True
list_of_index_lists[il] = mask
# convert to arrays
masks = np.array(list_of_index_lists)
return masks | Make an array in which rows store 1d mask arrays from list of index lists.
Parameters
----------
n : int
Maximal index / number of samples. |
def list_open_buffers(self):
"""
Return a `OpenBufferInfo` list that gives information about the
open buffers.
"""
active_eb = self.active_editor_buffer
visible_ebs = self.active_tab.visible_editor_buffers()
def make_info(i, eb):
return OpenBufferInfo(
index=i,
editor_buffer=eb,
is_active=(eb == active_eb),
is_visible=(eb in visible_ebs))
return [make_info(i, eb) for i, eb in enumerate(self.editor_buffers)] | Return a `OpenBufferInfo` list that gives information about the
open buffers. |
async def create_new_pump_async(self, partition_id, lease):
"""
Create a new pump thread with a given lease.
:param partition_id: The partition ID.
:type partition_id: str
:param lease: The lease to be used.
:type lease: ~azure.eventprocessorhost.lease.Lease
"""
loop = asyncio.get_event_loop()
partition_pump = EventHubPartitionPump(self.host, lease)
# Do the put after start, if the start fails then put doesn't happen
loop.create_task(partition_pump.open_async())
self.partition_pumps[partition_id] = partition_pump
_logger.info("Created new partition pump %r %r", self.host.guid, partition_id) | Create a new pump thread with a given lease.
:param partition_id: The partition ID.
:type partition_id: str
:param lease: The lease to be used.
:type lease: ~azure.eventprocessorhost.lease.Lease |
def main():
"""Generate a TPIP report."""
parser = argparse.ArgumentParser(description='Generate a TPIP report as a CSV file.')
parser.add_argument('output_filename', type=str, metavar='output-file',
help='the output path and filename', nargs='?')
parser.add_argument('--only', type=str, help='only parse this package')
args = parser.parse_args()
output_path = os.path.abspath(args.output_filename) if args.output_filename else None
skips = []
tpip_pkgs = []
for pkg_name, pkg_item in sorted(pkg_resources.working_set.by_key.items()):
if args.only and args.only not in pkg_name.lower():
continue
if pkg_name in EXCLUDED_PACKAGES:
skips.append(pkg_name)
continue
metadata_lines = get_metadata(pkg_item)
tpip_pkg = process_metadata(pkg_name, metadata_lines)
tpip_pkgs.append(force_ascii_values(tpip_pkg))
print(json.dumps(tpip_pkgs, indent=2, sort_keys=True))
print('Parsed %s packages\nOutput to CSV: `%s`\nIgnored packages: %s' % (
len(tpip_pkgs),
output_path,
', '.join(skips),
))
output_path and write_csv_file(output_path, tpip_pkgs) | Generate a TPIP report. |
def _get_user_class(self, name):
"""Get or create a user class of the given type."""
self._user_classes.setdefault(name, _make_user_class(self, name))
return self._user_classes[name] | Get or create a user class of the given type. |
def get_coordination_symmetry_measures_optim(self, only_minimum=True,
all_csms=True, nb_set=None, optimization=None):
"""
Returns the continuous symmetry measures of the current local geometry in a dictionary.
:return: the continuous symmetry measures of the current local geometry in a dictionary.
"""
cn = len(self.local_geometry.coords)
test_geometries = self.allcg.get_implemented_geometries(cn)
if all([cg.algorithms[0].algorithm_type == EXPLICIT_PERMUTATIONS for cg in test_geometries]):
return self.get_coordination_symmetry_measures(only_minimum=only_minimum, all_csms=all_csms,
optimization=optimization)
if not all([all([algo.algorithm_type == SEPARATION_PLANE
for algo in cg.algorithms]) for cg in test_geometries]):
raise ValueError('All algorithms should be EXPLICIT_PERMUTATIONS or SEPARATION_PLANE')
result_dict = {}
for geometry in test_geometries:
self.perfect_geometry = AbstractGeometry.from_cg(cg=geometry,
centering_type=self.centering_type,
include_central_site_in_centroid=
self.include_central_site_in_centroid)
points_perfect = self.perfect_geometry.points_wcs_ctwcc()
cgsm = self.coordination_geometry_symmetry_measures_sepplane_optim(geometry,
points_perfect=points_perfect,
nb_set=nb_set,
optimization=optimization)
result, permutations, algos, local2perfect_maps, perfect2local_maps = cgsm
if only_minimum:
if len(result) > 0:
imin = np.argmin([rr['symmetry_measure'] for rr in result])
if geometry.algorithms is not None:
algo = algos[imin]
else:
algo = algos
result_dict[geometry.mp_symbol] = {'csm': result[imin]['symmetry_measure'],
'indices': permutations[
imin],
'algo': algo,
'local2perfect_map':
local2perfect_maps[
imin],
'perfect2local_map':
perfect2local_maps[
imin],
'scaling_factor': 1.0 / result[imin]['scaling_factor'],
'rotation_matrix':
np.linalg.inv(result[imin]['rotation_matrix']),
'translation_vector': result[imin]['translation_vector']}
if all_csms:
self._update_results_all_csms(result_dict, permutations, imin, geometry)
return result_dict | Returns the continuous symmetry measures of the current local geometry in a dictionary.
:return: the continuous symmetry measures of the current local geometry in a dictionary. |
def check_exports(mod, specs, renamings):
'''
Does nothing but raising PythranSyntaxError if specs
references an undefined global
'''
functions = {renamings.get(k, k): v for k, v in specs.functions.items()}
mod_functions = {node.name: node for node in mod.body
if isinstance(node, ast.FunctionDef)}
for fname, signatures in functions.items():
try:
fnode = mod_functions[fname]
except KeyError:
raise PythranSyntaxError(
"Invalid spec: exporting undefined function `{}`"
.format(fname))
for signature in signatures:
args_count = len(fnode.args.args)
if len(signature) > args_count:
raise PythranSyntaxError(
"Too many arguments when exporting `{}`"
.format(fname))
elif len(signature) < args_count - len(fnode.args.defaults):
raise PythranSyntaxError(
"Not enough arguments when exporting `{}`"
.format(fname)) | Does nothing but raising PythranSyntaxError if specs
references an undefined global |
def seek(self, rev):
"""Arrange the caches to help look up the given revision."""
# TODO: binary search? Perhaps only when one or the other
# stack is very large?
if not self:
return
if type(rev) is not int:
raise TypeError("rev must be int")
past = self._past
future = self._future
if future:
appender = past.append
popper = future.pop
future_start = future[-1][0]
while future_start <= rev:
appender(popper())
if future:
future_start = future[-1][0]
else:
break
if past:
popper = past.pop
appender = future.append
past_end = past[-1][0]
while past_end > rev:
appender(popper())
if past:
past_end = past[-1][0]
else:
break | Arrange the caches to help look up the given revision. |
def deleteFeatures(self,
objectIds="",
where="",
geometryFilter=None,
gdbVersion=None,
rollbackOnFailure=True
):
""" removes 1:n features based on a sql statement
Input:
objectIds - The object IDs of this layer/table to be deleted
where - A where clause for the query filter. Any legal SQL
where clause operating on the fields in the layer is
allowed. Features conforming to the specified where
clause will be deleted.
geometryFilter - a filters.GeometryFilter object to limit
deletion by a geometry.
gdbVersion - Geodatabase version to apply the edits. This
parameter applies only if the isDataVersioned
property of the layer is true
rollbackOnFailure - parameter to specify if the edits should
be applied only if all submitted edits
succeed. If false, the server will apply
the edits that succeed even if some of
the submitted edits fail. If true, the
server will apply the edits only if all
edits succeed. The default value is true.
Output:
JSON response as dictionary
"""
dURL = self._url + "/deleteFeatures"
params = {
"f": "json",
'rollbackOnFailure' : rollbackOnFailure
}
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if geometryFilter is not None and \
isinstance(geometryFilter, filters.GeometryFilter):
gfilter = geometryFilter.filter
params['geometry'] = gfilter['geometry']
params['geometryType'] = gfilter['geometryType']
params['inSR'] = gfilter['inSR']
params['spatialRel'] = gfilter['spatialRel']
if where is not None and \
where != "":
params['where'] = where
if objectIds is not None and \
objectIds != "":
params['objectIds'] = objectIds
result = self._post(url=dURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
self.__init()
return result | removes 1:n features based on a sql statement
Input:
objectIds - The object IDs of this layer/table to be deleted
where - A where clause for the query filter. Any legal SQL
where clause operating on the fields in the layer is
allowed. Features conforming to the specified where
clause will be deleted.
geometryFilter - a filters.GeometryFilter object to limit
deletion by a geometry.
gdbVersion - Geodatabase version to apply the edits. This
parameter applies only if the isDataVersioned
property of the layer is true
rollbackOnFailure - parameter to specify if the edits should
be applied only if all submitted edits
succeed. If false, the server will apply
the edits that succeed even if some of
the submitted edits fail. If true, the
server will apply the edits only if all
edits succeed. The default value is true.
Output:
JSON response as dictionary |
def add_user(
self, user,
first_name=None, last_name=None,
email=None, password=None
):
"""
Add a new user.
Args:
user (string): User name.
first_name (optional[string]): User's first name. Defaults to None.
last_name (optional[string]): User's last name. Defaults to None.
email: (optional[string]): User's email address. Defaults to None.
password: (optional[string]): User's password. Defaults to None.
Raises:
requests.HTTPError on failure.
"""
self.project_service.set_auth(self._token_project)
self.project_service.add_user(
user, first_name, last_name, email, password) | Add a new user.
Args:
user (string): User name.
first_name (optional[string]): User's first name. Defaults to None.
last_name (optional[string]): User's last name. Defaults to None.
email: (optional[string]): User's email address. Defaults to None.
password: (optional[string]): User's password. Defaults to None.
Raises:
requests.HTTPError on failure. |
def get_coordinate_systems(
self, token: dict = None, srs_code: str = None, prot: str = "https"
) -> dict:
"""Get available coordinate systems in Isogeo API.
:param str token: API auth token
:param str srs_code: code of a specific coordinate system
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
"""
# if specific format
if isinstance(srs_code, str):
specific_srs = "/{}".format(srs_code)
else:
specific_srs = ""
# search request
req_url = "{}://v1.{}.isogeo.com/coordinate-systems{}".format(
prot, self.api_url, specific_srs
)
req = self.get(
req_url, headers=self.header, proxies=self.proxies, verify=self.ssl
)
# checking response
checker.check_api_response(req)
# end of method
return req.json() | Get available coordinate systems in Isogeo API.
:param str token: API auth token
:param str srs_code: code of a specific coordinate system
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs). |
def _autoinsert_quotes(self, key):
"""Control how to automatically insert quotes in various situations."""
char = {Qt.Key_QuoteDbl: '"', Qt.Key_Apostrophe: '\''}[key]
line_text = self.editor.get_text('sol', 'eol')
line_to_cursor = self.editor.get_text('sol', 'cursor')
cursor = self.editor.textCursor()
last_three = self.editor.get_text('sol', 'cursor')[-3:]
last_two = self.editor.get_text('sol', 'cursor')[-2:]
trailing_text = self.editor.get_text('cursor', 'eol').strip()
if self.editor.has_selected_text():
text = self.editor.get_selected_text()
self.editor.insert_text("{0}{1}{0}".format(char, text))
# keep text selected, for inserting multiple quotes
cursor.movePosition(QTextCursor.Left, QTextCursor.MoveAnchor, 1)
cursor.movePosition(QTextCursor.Left, QTextCursor.KeepAnchor,
len(text))
self.editor.setTextCursor(cursor)
elif self.editor.in_comment():
self.editor.insert_text(char)
elif (len(trailing_text) > 0 and
not unmatched_quotes_in_line(line_to_cursor) == char and
not trailing_text[0] in (',', ':', ';', ')', ']', '}')):
self.editor.insert_text(char)
elif (unmatched_quotes_in_line(line_text) and
(not last_three == 3*char)):
self.editor.insert_text(char)
# Move to the right if we are before a quote
elif self.editor.next_char() == char:
cursor.movePosition(QTextCursor.NextCharacter,
QTextCursor.KeepAnchor, 1)
cursor.clearSelection()
self.editor.setTextCursor(cursor)
# Automatic insertion of triple double quotes (for docstrings)
elif last_three == 3*char:
self.editor.insert_text(3*char)
cursor = self.editor.textCursor()
cursor.movePosition(QTextCursor.PreviousCharacter,
QTextCursor.KeepAnchor, 3)
cursor.clearSelection()
self.editor.setTextCursor(cursor)
# If last two chars are quotes, just insert one more because most
# probably the user wants to write a docstring
elif last_two == 2*char:
self.editor.insert_text(char)
self.editor.delayed_popup_docstring()
# Automatic insertion of quotes
else:
self.editor.insert_text(2*char)
cursor = self.editor.textCursor()
cursor.movePosition(QTextCursor.PreviousCharacter)
self.editor.setTextCursor(cursor) | Control how to automatically insert quotes in various situations. |
def run_model(self,
op_list,
num_steps,
feed_vars=(),
feed_data=None,
print_every=100,
allow_initialize=True):
"""Runs `op_list` for `num_steps`.
Args:
op_list: A list of ops to run.
num_steps: Number of steps to run this for. If feeds are used, this is a
maximum. `None` can be used to signal "forever".
feed_vars: The variables to feed.
feed_data: An iterator that feeds data tuples.
print_every: Print a log line and checkpoing every so many steps.
allow_initialize: If True, the model will be initialized if any variable
is uninitialized, if False the model will not be initialized.
Returns:
The final run result as a list.
Raises:
ValueError: If feed_data doesn't match feed_vars.
"""
feed_data = feed_data or itertools.repeat(())
ops = [bookkeeper.global_step()]
ops.extend(op_list)
sess = tf.get_default_session()
self.prepare_model(sess, allow_initialize=allow_initialize)
results = []
try:
if num_steps is None:
counter = itertools.count(0)
elif num_steps >= 0:
counter = xrange(num_steps)
else:
raise ValueError('num_steps cannot be negative: %s' % num_steps)
for i, data in zip(counter, feed_data):
log_this_time = print_every and i % print_every == 0
if len(data) != len(feed_vars):
raise ValueError(
'feed_data and feed_vars must be the same length: %d vs %d' % (
len(data), len(feed_vars)))
if self._coord.should_stop():
print('Coordinator stopped')
sys.stdout.flush()
self.stop_queues()
break
if len(feed_vars) != len(data):
raise ValueError('Feed vars must be the same length as data.')
if log_this_time and self._summary_writer:
results = sess.run(ops + [self._summaries],
dict(zip(feed_vars, data)))
self._summary_writer.add_summary(results[-1], results[0])
results = results[:-1]
else:
results = sess.run(ops, dict(zip(feed_vars, data)))
if log_this_time:
self._log_and_save(sess, results)
# Print the last line if it wasn't just printed
if print_every and not log_this_time:
self._log_and_save(sess, results)
except tf.errors.OutOfRangeError as ex:
print('Done training -- epoch limit reached %s' % ex.message)
sys.stdout.flush()
self.stop_queues()
except BaseException as ex:
print('Exception -- stopping threads: %s' % ex, file=sys.stderr)
sys.stdout.flush()
self.stop_queues()
raise
return results | Runs `op_list` for `num_steps`.
Args:
op_list: A list of ops to run.
num_steps: Number of steps to run this for. If feeds are used, this is a
maximum. `None` can be used to signal "forever".
feed_vars: The variables to feed.
feed_data: An iterator that feeds data tuples.
print_every: Print a log line and checkpoing every so many steps.
allow_initialize: If True, the model will be initialized if any variable
is uninitialized, if False the model will not be initialized.
Returns:
The final run result as a list.
Raises:
ValueError: If feed_data doesn't match feed_vars. |
def set_cmap(self, cmap, callback=True):
"""
Set the color map used by this RGBMapper.
`cmap` specifies a ColorMap object. If `callback` is True, then
any callbacks associated with this change will be invoked.
"""
self.cmap = cmap
with self.suppress_changed:
self.calc_cmap()
# TEMP: ignore passed callback parameter
# callback=False in the following because we don't want to
# recursively invoke set_cmap()
self.t_.set(color_map=cmap.name, callback=False) | Set the color map used by this RGBMapper.
`cmap` specifies a ColorMap object. If `callback` is True, then
any callbacks associated with this change will be invoked. |
def countthai(text: str, ignore_chars: str = _DEFAULT_IGNORE_CHARS) -> float:
"""
:param str text: input text
:return: float, proportion of characters in the text that is Thai character
"""
if not text or not isinstance(text, str):
return 0
if not ignore_chars:
ignore_chars = ""
num_thai = 0
num_ignore = 0
for ch in text:
if ch in ignore_chars:
num_ignore += 1
elif isthaichar(ch):
num_thai += 1
num_count = len(text) - num_ignore
return (num_thai / num_count) * 100 | :param str text: input text
:return: float, proportion of characters in the text that is Thai character |
def string2identifier(s):
"""Turn a string into a valid python identifier.
Currently only allows ASCII letters and underscore. Illegal characters
are replaced with underscore. This is slightly more opinionated than
python 3 itself, and may be refactored in future (see PEP 3131).
Parameters
----------
s : string
string to convert
Returns
-------
str
valid python identifier.
"""
# https://docs.python.org/3/reference/lexical_analysis.html#identifiers
# https://www.python.org/dev/peps/pep-3131/
if len(s) == 0:
return "_"
if s[0] not in string.ascii_letters:
s = "_" + s
valids = string.ascii_letters + string.digits + "_"
out = ""
for i, char in enumerate(s):
if char in valids:
out += char
else:
out += "_"
return out | Turn a string into a valid python identifier.
Currently only allows ASCII letters and underscore. Illegal characters
are replaced with underscore. This is slightly more opinionated than
python 3 itself, and may be refactored in future (see PEP 3131).
Parameters
----------
s : string
string to convert
Returns
-------
str
valid python identifier. |
def quantile(data, num_breaks):
"""
Calculate quantile breaks.
Arguments:
data -- Array of values to classify.
num_breaks -- Number of breaks to perform.
"""
def scipy_mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, limit=()):
""" function copied from scipy 0.13.3::scipy.stats.mstats.mquantiles """
def _quantiles1D(data,m,p):
x = numpy.sort(data.compressed())
n = len(x)
if n == 0:
return numpy.ma.array(numpy.empty(len(p), dtype=float), mask=True)
elif n == 1:
return numpy.ma.array(numpy.resize(x, p.shape), mask=numpy.ma.nomask)
aleph = (n*p + m)
k = numpy.floor(aleph.clip(1, n-1)).astype(int)
gamma = (aleph-k).clip(0,1)
return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()]
# Initialization & checks ---------
data = numpy.ma.array(a, copy=False)
if data.ndim > 2:
raise TypeError("Array should be 2D at most !")
#
if limit:
condition = (limit[0] < data) & (data < limit[1])
data[~condition.filled(True)] = numpy.ma.masked
#
p = numpy.array(prob, copy=False, ndmin=1)
m = alphap + p*(1.-alphap-betap)
# Computes quantiles along axis (or globally)
if (axis is None):
return _quantiles1D(data, m, p)
return numpy.ma.apply_along_axis(_quantiles1D, axis, data, m, p)
return scipy_mquantiles(data, numpy.linspace(1.0 / num_breaks, 1, num_breaks)) | Calculate quantile breaks.
Arguments:
data -- Array of values to classify.
num_breaks -- Number of breaks to perform. |
def visit_AugAssign(self, node):
"""
AugAssigned value depend on r-value type dependencies.
It is valid for subscript, `a[i] += foo()` means `a` type depend on
`foo` return type and previous a types too.
"""
args = (self.naming[get_variable(node.target).id],
self.visit(node.value))
merge_dep = list({frozenset.union(*x)
for x in itertools.product(*args)})
self.naming[get_variable(node.target).id] = merge_dep | AugAssigned value depend on r-value type dependencies.
It is valid for subscript, `a[i] += foo()` means `a` type depend on
`foo` return type and previous a types too. |
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_rx_vlan_disc_req(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
output = ET.SubElement(fcoe_get_interface, "output")
fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list")
fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id")
fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id')
fcoe_intf_rx_vlan_disc_req = ET.SubElement(fcoe_intf_list, "fcoe-intf-rx-vlan-disc-req")
fcoe_intf_rx_vlan_disc_req.text = kwargs.pop('fcoe_intf_rx_vlan_disc_req')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def readFLOAT16(self):
""" Read a 2 byte float """
self.reset_bits_pending()
word = self.readUI16()
sign = -1 if ((word & 0x8000) != 0) else 1
exponent = (word >> 10) & 0x1f
significand = word & 0x3ff
if exponent == 0:
if significand == 0:
return 0.0
else:
return sign * math.pow(2, 1 - SWFStream.FLOAT16_EXPONENT_BASE) * (significand / 1024.0)
if exponent == 31:
if significand == 0:
return float('-inf') if sign < 0 else float('inf')
else:
return float('nan')
# normal number
return sign * math.pow(2, exponent - SWFStream.FLOAT16_EXPONENT_BASE) * (1 + significand / 1024.0) | Read a 2 byte float |
def _configure_io_handler(self, handler):
"""Register an io-handler at the polling object."""
if self.check_events():
return
if handler in self._unprepared_handlers:
old_fileno = self._unprepared_handlers[handler]
prepared = self._prepare_io_handler(handler)
else:
old_fileno = None
prepared = True
fileno = handler.fileno()
if old_fileno is not None and fileno != old_fileno:
del self._handlers[old_fileno]
try:
self.poll.unregister(old_fileno)
except KeyError:
# The socket has changed, but the old one isn't registered,
# e.g. ``prepare`` wants to connect again
pass
if not prepared:
self._unprepared_handlers[handler] = fileno
if not fileno:
return
self._handlers[fileno] = handler
events = 0
if handler.is_readable():
logger.debug(" {0!r} readable".format(handler))
events |= select.POLLIN
if handler.is_writable():
logger.debug(" {0!r} writable".format(handler))
events |= select.POLLOUT
if events:
logger.debug(" registering {0!r} handler fileno {1} for"
" events {2}".format(handler, fileno, events))
self.poll.register(fileno, events) | Register an io-handler at the polling object. |
def search(self, pattern, minAddr = None, maxAddr = None):
"""
Search for the given pattern within the process memory.
@type pattern: str, compat.unicode or L{Pattern}
@param pattern: Pattern to search for.
It may be a byte string, a Unicode string, or an instance of
L{Pattern}.
The following L{Pattern} subclasses are provided by WinAppDbg:
- L{BytePattern}
- L{TextPattern}
- L{RegExpPattern}
- L{HexPattern}
You can also write your own subclass of L{Pattern} for customized
searches.
@type minAddr: int
@param minAddr: (Optional) Start the search at this memory address.
@type maxAddr: int
@param maxAddr: (Optional) Stop the search at this memory address.
@rtype: iterator of tuple( int, int, str )
@return: An iterator of tuples. Each tuple contains the following:
- The memory address where the pattern was found.
- The size of the data that matches the pattern.
- The data that matches the pattern.
@raise WindowsError: An error occurred when querying or reading the
process memory.
"""
if isinstance(pattern, str):
return self.search_bytes(pattern, minAddr, maxAddr)
if isinstance(pattern, compat.unicode):
return self.search_bytes(pattern.encode("utf-16le"),
minAddr, maxAddr)
if isinstance(pattern, Pattern):
return Search.search_process(self, pattern, minAddr, maxAddr)
raise TypeError("Unknown pattern type: %r" % type(pattern)) | Search for the given pattern within the process memory.
@type pattern: str, compat.unicode or L{Pattern}
@param pattern: Pattern to search for.
It may be a byte string, a Unicode string, or an instance of
L{Pattern}.
The following L{Pattern} subclasses are provided by WinAppDbg:
- L{BytePattern}
- L{TextPattern}
- L{RegExpPattern}
- L{HexPattern}
You can also write your own subclass of L{Pattern} for customized
searches.
@type minAddr: int
@param minAddr: (Optional) Start the search at this memory address.
@type maxAddr: int
@param maxAddr: (Optional) Stop the search at this memory address.
@rtype: iterator of tuple( int, int, str )
@return: An iterator of tuples. Each tuple contains the following:
- The memory address where the pattern was found.
- The size of the data that matches the pattern.
- The data that matches the pattern.
@raise WindowsError: An error occurred when querying or reading the
process memory. |
def url2domain(url):
""" extract domain from url
"""
parsed_uri = urlparse.urlparse(url)
domain = '{uri.netloc}'.format(uri=parsed_uri)
domain = re.sub("^.+@", "", domain)
domain = re.sub(":.+$", "", domain)
return domain | extract domain from url |
def _detect_xerial_stream(payload):
"""Detects if the data given might have been encoded with the blocking mode
of the xerial snappy library.
This mode writes a magic header of the format:
+--------+--------------+------------+---------+--------+
| Marker | Magic String | Null / Pad | Version | Compat |
+--------+--------------+------------+---------+--------+
| byte | c-string | byte | int32 | int32 |
+--------+--------------+------------+---------+--------+
| -126 | 'SNAPPY' | \0 | | |
+--------+--------------+------------+---------+--------+
The pad appears to be to ensure that SNAPPY is a valid cstring
The version is the version of this format as written by xerial,
in the wild this is currently 1 as such we only support v1.
Compat is there to claim the miniumum supported version that
can read a xerial block stream, presently in the wild this is
1.
"""
if len(payload) > 16:
header = struct.unpack('!' + _XERIAL_V1_FORMAT, bytes(payload)[:16])
return header == _XERIAL_V1_HEADER
return False | Detects if the data given might have been encoded with the blocking mode
of the xerial snappy library.
This mode writes a magic header of the format:
+--------+--------------+------------+---------+--------+
| Marker | Magic String | Null / Pad | Version | Compat |
+--------+--------------+------------+---------+--------+
| byte | c-string | byte | int32 | int32 |
+--------+--------------+------------+---------+--------+
| -126 | 'SNAPPY' | \0 | | |
+--------+--------------+------------+---------+--------+
The pad appears to be to ensure that SNAPPY is a valid cstring
The version is the version of this format as written by xerial,
in the wild this is currently 1 as such we only support v1.
Compat is there to claim the miniumum supported version that
can read a xerial block stream, presently in the wild this is
1. |
def queries(self, request):
'''Multiple Database Queries'''
queries = self.get_queries(request)
worlds = []
with self.mapper.begin() as session:
for _ in range(queries):
world = session.query(World).get(randint(1, MAXINT))
worlds.append(self.get_json(world))
return Json(worlds).http_response(request) | Multiple Database Queries |
def read(self, auth, resource, options, defer=False):
""" Read value(s) from a dataport.
Calls a function that builds a request to read the dataport specified by an alias or rid
and returns timeseries data as defined by the options.
Args:
auth: Takes the device cik
resource: Takes the dataport alias or rid.
options: Takes a list of options for what to return.
"""
return self._call('read', auth, [resource, options], defer) | Read value(s) from a dataport.
Calls a function that builds a request to read the dataport specified by an alias or rid
and returns timeseries data as defined by the options.
Args:
auth: Takes the device cik
resource: Takes the dataport alias or rid.
options: Takes a list of options for what to return. |
def get_pic(self, playingsong, tempfile_path):
'''获取专辑封面'''
url = playingsong['picture'].replace('\\', '')
for _ in range(3):
try:
urllib.urlretrieve(url, tempfile_path)
logger.debug('Get cover art success!')
return True
except (IOError, urllib.ContentTooShortError):
pass
logger.error('Get cover art failed!')
return False | 获取专辑封面 |
def get_balance(self):
"""Check the balance fot this account.
Returns a dictionary containing:
account_type: The account type
balance: The balance remaining on the account
currency: The currency used for the account balance. Assume GBP in not set"""
xml_root = self.__init_xml('Balance')
response = clockwork_http.request(BALANCE_URL, etree.tostring(xml_root, encoding='utf-8'))
data_etree = etree.fromstring(response['data'])
err_desc = data_etree.find('ErrDesc')
if err_desc is not None:
raise clockwork_exceptions.ApiException(err_desc.text, data_etree.find('ErrNo').text)
result = {}
result['account_type'] = data_etree.find('AccountType').text
result['balance'] = data_etree.find('Balance').text
result['currency'] = data_etree.find('Currency').text
return result | Check the balance fot this account.
Returns a dictionary containing:
account_type: The account type
balance: The balance remaining on the account
currency: The currency used for the account balance. Assume GBP in not set |
def _get_padded(data, start, end):
"""Return `data[start:end]` filling in with zeros outside array bounds
Assumes that either `start<0` or `end>len(data)` but not both.
"""
if start < 0 and end > data.shape[0]:
raise RuntimeError()
if start < 0:
start_zeros = np.zeros((-start, data.shape[1]),
dtype=data.dtype)
return np.vstack((start_zeros, data[:end]))
elif end > data.shape[0]:
end_zeros = np.zeros((end - data.shape[0], data.shape[1]),
dtype=data.dtype)
return np.vstack((data[start:], end_zeros))
else:
return data[start:end] | Return `data[start:end]` filling in with zeros outside array bounds
Assumes that either `start<0` or `end>len(data)` but not both. |
def _resolve_paths(self, *paths):
"""
Resolve paths into a set of filenames (no directories) to check.
External tools will handle directories as arguments differently, so for
consistency we just want to pass them filenames.
This method will recursively walk all directories and filter out
any paths that match self.options.ignores.
"""
result = set()
for path in paths:
if os.path.isdir(path):
for dirpath, _, filenames in os.walk(path):
for filename in filenames:
path = os.path.join(dirpath, filename)
if path.startswith('.'):
path = path[1:].lstrip('/')
if not self._should_ignore(path):
result.add(path)
else:
result.add(path)
return result | Resolve paths into a set of filenames (no directories) to check.
External tools will handle directories as arguments differently, so for
consistency we just want to pass them filenames.
This method will recursively walk all directories and filter out
any paths that match self.options.ignores. |
def init(cls, path=None, mkdir=True, odbt=GitCmdObjectDB, expand_vars=True, **kwargs):
"""Initialize a git repository at the given path if specified
:param path:
is the full path to the repo (traditionally ends with /<name>.git)
or None in which case the repository will be created in the current
working directory
:param mkdir:
if specified will create the repository directory if it doesn't
already exists. Creates the directory with a mode=0755.
Only effective if a path is explicitly given
:param odbt:
Object DataBase type - a type which is constructed by providing
the directory containing the database objects, i.e. .git/objects.
It will be used to access all object data
:param expand_vars:
if specified, environment variables will not be escaped. This
can lead to information disclosure, allowing attackers to
access the contents of environment variables
:param kwargs:
keyword arguments serving as additional options to the git-init command
:return: ``git.Repo`` (the newly created repo)"""
if path:
path = expand_path(path, expand_vars)
if mkdir and path and not osp.exists(path):
os.makedirs(path, 0o755)
# git command automatically chdir into the directory
git = Git(path)
git.init(**kwargs)
return cls(path, odbt=odbt) | Initialize a git repository at the given path if specified
:param path:
is the full path to the repo (traditionally ends with /<name>.git)
or None in which case the repository will be created in the current
working directory
:param mkdir:
if specified will create the repository directory if it doesn't
already exists. Creates the directory with a mode=0755.
Only effective if a path is explicitly given
:param odbt:
Object DataBase type - a type which is constructed by providing
the directory containing the database objects, i.e. .git/objects.
It will be used to access all object data
:param expand_vars:
if specified, environment variables will not be escaped. This
can lead to information disclosure, allowing attackers to
access the contents of environment variables
:param kwargs:
keyword arguments serving as additional options to the git-init command
:return: ``git.Repo`` (the newly created repo) |
def exists(config):
"""
Check whether the .wily/ directory exists.
:param config: The configuration
:type config: :class:`wily.config.WilyConfig`
:return: Whether the .wily directory exists
:rtype: ``boolean``
"""
exists = (
pathlib.Path(config.cache_path).exists()
and pathlib.Path(config.cache_path).is_dir()
)
if not exists:
return False
index_path = pathlib.Path(config.cache_path) / "index.json"
if index_path.exists():
with open(index_path, "r") as out:
index = json.load(out)
if index["version"] != __version__:
# TODO: Inspect the versions properly.
logger.warning(
"Wily cache is old, you may incur errors until you rebuild the cache."
)
else:
logger.warning(
"Wily cache was not versioned, you may incur errors until you rebuild the cache."
)
create_index(config)
return True | Check whether the .wily/ directory exists.
:param config: The configuration
:type config: :class:`wily.config.WilyConfig`
:return: Whether the .wily directory exists
:rtype: ``boolean`` |
def DynamicCmd(name, plugins):
"""
Returns a cmd with the added plugins,
:param name: TODO:
:param plugins: list of plugins
"""
exec('class %s(cmd.Cmd):\n prompt="cm> "' % name)
plugin_objects = []
for plugin in plugins:
classprefix = plugin['class']
plugin_list = plugin['plugins']
plugin_objects = plugin_objects + \
load_plugins(classprefix, plugin_list)
exec_command = make_cmd_class(name, *plugin_objects)()
return (exec_command, plugin_objects) | Returns a cmd with the added plugins,
:param name: TODO:
:param plugins: list of plugins |
def get_hubs(self):
"""Get a list of hubs names.
Returns
-------
hubs : list
List of hub names
"""
# Use helm to get a list of hubs.
output = helm(
'list',
'-q'
)
# Check if an error occurred.
if output.returncode != 0:
print("Something went wrong!")
print(output.stderr)
else:
hubs = output.stdout.split()
return hubs | Get a list of hubs names.
Returns
-------
hubs : list
List of hub names |
def insert_file(self, file):
"""insert_file(file)
Load resources entries from FILE, and insert them into the
database. FILE can be a filename (a string)or a file object.
"""
if type(file) is bytes:
file = open(file, 'r')
self.insert_string(file.read()) | insert_file(file)
Load resources entries from FILE, and insert them into the
database. FILE can be a filename (a string)or a file object. |
def set_dhw_on(self, until=None):
"""Sets the DHW on until a given time, or permanently."""
if until is None:
data = {"Mode": "PermanentOverride",
"State": "On",
"UntilTime": None}
else:
data = {"Mode": "TemporaryOverride",
"State": "On",
"UntilTime": until.strftime('%Y-%m-%dT%H:%M:%SZ')}
self._set_dhw(data) | Sets the DHW on until a given time, or permanently. |
async def _upload_chunks(
cls, rfile: BootResourceFile, content: io.IOBase, chunk_size: int,
progress_callback=None):
"""Upload the `content` to `rfile` in chunks using `chunk_size`."""
content.seek(0, io.SEEK_SET)
upload_uri = urlparse(
cls._handler.uri)._replace(path=rfile._data['upload_uri']).geturl()
uploaded_size = 0
insecure = cls._handler.session.insecure
connector = aiohttp.TCPConnector(verify_ssl=(not insecure))
session = aiohttp.ClientSession(connector=connector)
async with session:
while True:
buf = content.read(chunk_size)
length = len(buf)
if length > 0:
uploaded_size += length
await cls._put_chunk(session, upload_uri, buf)
if progress_callback is not None:
progress_callback(uploaded_size / rfile.size)
if length != chunk_size:
break | Upload the `content` to `rfile` in chunks using `chunk_size`. |
def predicate_type(self, pred: URIRef) -> URIRef:
"""
Return the type of pred
:param pred: predicate to map
:return:
"""
return self._o.value(pred, RDFS.range) | Return the type of pred
:param pred: predicate to map
:return: |
def plot_poles(map_axis, plon, plat, A95, label='', color='k', edgecolor='k', marker='o', markersize=20, legend='no'):
"""
This function plots paleomagnetic poles and A95 error ellipses on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Examples
-------
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95 = [6, 3, 10]
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles(map_axis, plons, plats, A95s, color='red', markersize=40)
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95 = [6, 3, 10]
>>> colors = ['red','green','blue']
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles(map_axis, plons, plats, A95s, color=colors, markersize=40)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
a list of colors can also be given so that each pole has a distinct color
edgecolor : the default edgecolor is black. Other colors can be chosen (e.g. 'r')
marker : the default is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
"""
map_axis.scatter(plon, plat, marker=marker,
color=color, edgecolors=edgecolor, s=markersize,
label=label, zorder=101, transform=ccrs.Geodetic())
if isinstance(color,str)==True:
for n in range(0,len(A95)):
A95_km = A95[n] * 111.32
equi(map_axis, plon[n], plat[n], A95_km, color)
else:
for n in range(0,len(A95)):
A95_km = A95[n] * 111.32
equi(map_axis, plon[n], plat[n], A95_km, color[n])
if legend == 'yes':
plt.legend(loc=2) | This function plots paleomagnetic poles and A95 error ellipses on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Examples
-------
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95 = [6, 3, 10]
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles(map_axis, plons, plats, A95s, color='red', markersize=40)
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95 = [6, 3, 10]
>>> colors = ['red','green','blue']
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles(map_axis, plons, plats, A95s, color=colors, markersize=40)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
a list of colors can also be given so that each pole has a distinct color
edgecolor : the default edgecolor is black. Other colors can be chosen (e.g. 'r')
marker : the default is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend. |
def _get(self, url, params={}):
"""Wrapper around request.get() to use the API prefix. Returns a JSON response."""
req = self._session.get(self._api_prefix + url, params=params)
return self._action(req) | Wrapper around request.get() to use the API prefix. Returns a JSON response. |
def _get_argv(index, default=None):
''' get the argv input argument defined by index. Return the default
attribute if that argument does not exist
'''
return _sys.argv[index] if len(_sys.argv) > index else default | get the argv input argument defined by index. Return the default
attribute if that argument does not exist |
def load_plume_package(package, plume_dir, accept_defaults):
"""Loads a canari package into Plume."""
from canari.commands.load_plume_package import load_plume_package
load_plume_package(package, plume_dir, accept_defaults) | Loads a canari package into Plume. |
def is_attacked_by(self, color: Color, square: Square) -> bool:
"""
Checks if the given side attacks the given square.
Pinned pieces still count as attackers. Pawns that can be captured
en passant are **not** considered attacked.
"""
return bool(self.attackers_mask(color, square)) | Checks if the given side attacks the given square.
Pinned pieces still count as attackers. Pawns that can be captured
en passant are **not** considered attacked. |
def svm_train(arg1, arg2=None, arg3=None):
"""
svm_train(y, x [, options]) -> model | ACC | MSE
y: a list/tuple/ndarray of l true labels (type must be int/double).
x: 1. a list/tuple of l training instances. Feature vector of
each training instance is a list/tuple or dictionary.
2. an l * n numpy ndarray or scipy spmatrix (n: number of features).
svm_train(prob [, options]) -> model | ACC | MSE
svm_train(prob, param) -> model | ACC| MSE
Train an SVM model from data (y, x) or an svm_problem prob using
'options' or an svm_parameter param.
If '-v' is specified in 'options' (i.e., cross validation)
either accuracy (ACC) or mean-squared error (MSE) is returned.
options:
-s svm_type : set type of SVM (default 0)
0 -- C-SVC (multi-class classification)
1 -- nu-SVC (multi-class classification)
2 -- one-class SVM
3 -- epsilon-SVR (regression)
4 -- nu-SVR (regression)
-t kernel_type : set type of kernel function (default 2)
0 -- linear: u'*v
1 -- polynomial: (gamma*u'*v + coef0)^degree
2 -- radial basis function: exp(-gamma*|u-v|^2)
3 -- sigmoid: tanh(gamma*u'*v + coef0)
4 -- precomputed kernel (kernel values in training_set_file)
-d degree : set degree in kernel function (default 3)
-g gamma : set gamma in kernel function (default 1/num_features)
-r coef0 : set coef0 in kernel function (default 0)
-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)
-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)
-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)
-m cachesize : set cache memory size in MB (default 100)
-e epsilon : set tolerance of termination criterion (default 0.001)
-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)
-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)
-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)
-v n: n-fold cross validation mode
-q : quiet mode (no outputs)
"""
prob, param = None, None
if isinstance(arg1, (list, tuple)) or (scipy and isinstance(arg1, scipy.ndarray)):
assert isinstance(arg2, (list, tuple)) or (scipy and isinstance(arg2, (scipy.ndarray, sparse.spmatrix)))
y, x, options = arg1, arg2, arg3
param = svm_parameter(options)
prob = svm_problem(y, x, isKernel=(param.kernel_type == PRECOMPUTED))
elif isinstance(arg1, svm_problem):
prob = arg1
if isinstance(arg2, svm_parameter):
param = arg2
else:
param = svm_parameter(arg2)
if prob == None or param == None:
raise TypeError("Wrong types for the arguments")
if param.kernel_type == PRECOMPUTED:
for i in range(prob.l):
xi = prob.x[i]
idx, val = xi[0].index, xi[0].value
if idx != 0:
raise ValueError('Wrong input format: first column must be 0:sample_serial_number')
if val <= 0 or val > prob.n:
raise ValueError('Wrong input format: sample_serial_number out of range')
if param.gamma == 0 and prob.n > 0:
param.gamma = 1.0 / prob.n
libsvm.svm_set_print_string_function(param.print_func)
err_msg = libsvm.svm_check_parameter(prob, param)
if err_msg:
raise ValueError('Error: %s' % err_msg)
if param.cross_validation:
l, nr_fold = prob.l, param.nr_fold
target = (c_double * l)()
libsvm.svm_cross_validation(prob, param, nr_fold, target)
ACC, MSE, SCC = evaluations(prob.y[:l], target[:l])
if param.svm_type in [EPSILON_SVR, NU_SVR]:
print("Cross Validation Mean squared error = %g" % MSE)
print("Cross Validation Squared correlation coefficient = %g" % SCC)
return MSE
else:
print("Cross Validation Accuracy = %g%%" % ACC)
return ACC
else:
m = libsvm.svm_train(prob, param)
m = toPyModel(m)
# If prob is destroyed, data including SVs pointed by m can remain.
m.x_space = prob.x_space
return m | svm_train(y, x [, options]) -> model | ACC | MSE
y: a list/tuple/ndarray of l true labels (type must be int/double).
x: 1. a list/tuple of l training instances. Feature vector of
each training instance is a list/tuple or dictionary.
2. an l * n numpy ndarray or scipy spmatrix (n: number of features).
svm_train(prob [, options]) -> model | ACC | MSE
svm_train(prob, param) -> model | ACC| MSE
Train an SVM model from data (y, x) or an svm_problem prob using
'options' or an svm_parameter param.
If '-v' is specified in 'options' (i.e., cross validation)
either accuracy (ACC) or mean-squared error (MSE) is returned.
options:
-s svm_type : set type of SVM (default 0)
0 -- C-SVC (multi-class classification)
1 -- nu-SVC (multi-class classification)
2 -- one-class SVM
3 -- epsilon-SVR (regression)
4 -- nu-SVR (regression)
-t kernel_type : set type of kernel function (default 2)
0 -- linear: u'*v
1 -- polynomial: (gamma*u'*v + coef0)^degree
2 -- radial basis function: exp(-gamma*|u-v|^2)
3 -- sigmoid: tanh(gamma*u'*v + coef0)
4 -- precomputed kernel (kernel values in training_set_file)
-d degree : set degree in kernel function (default 3)
-g gamma : set gamma in kernel function (default 1/num_features)
-r coef0 : set coef0 in kernel function (default 0)
-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)
-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)
-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)
-m cachesize : set cache memory size in MB (default 100)
-e epsilon : set tolerance of termination criterion (default 0.001)
-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)
-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)
-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)
-v n: n-fold cross validation mode
-q : quiet mode (no outputs) |
def places_photo(client, photo_reference, max_width=None, max_height=None):
"""
Downloads a photo from the Places API.
:param photo_reference: A string identifier that uniquely identifies a
photo, as provided by either a Places search or Places detail request.
:type photo_reference: string
:param max_width: Specifies the maximum desired width, in pixels.
:type max_width: int
:param max_height: Specifies the maximum desired height, in pixels.
:type max_height: int
:rtype: iterator containing the raw image data, which typically can be
used to save an image file locally. For example:
```
f = open(local_filename, 'wb')
for chunk in client.places_photo(photo_reference, max_width=100):
if chunk:
f.write(chunk)
f.close()
```
"""
if not (max_width or max_height):
raise ValueError("a max_width or max_height arg is required")
params = {"photoreference": photo_reference}
if max_width:
params["maxwidth"] = max_width
if max_height:
params["maxheight"] = max_height
# "extract_body" and "stream" args here are used to return an iterable
# response containing the image file data, rather than converting from
# json.
response = client._request("/maps/api/place/photo", params,
extract_body=lambda response: response,
requests_kwargs={"stream": True})
return response.iter_content() | Downloads a photo from the Places API.
:param photo_reference: A string identifier that uniquely identifies a
photo, as provided by either a Places search or Places detail request.
:type photo_reference: string
:param max_width: Specifies the maximum desired width, in pixels.
:type max_width: int
:param max_height: Specifies the maximum desired height, in pixels.
:type max_height: int
:rtype: iterator containing the raw image data, which typically can be
used to save an image file locally. For example:
```
f = open(local_filename, 'wb')
for chunk in client.places_photo(photo_reference, max_width=100):
if chunk:
f.write(chunk)
f.close()
``` |
def fetch(**kwargs):
'''
.. versionadded:: 2016.3.4
freebsd-update fetch wrapper. Based on the currently installed world and the
configuration options set, fetch all available binary updates.
kwargs:
Parameters of freebsd-update command.
'''
# fetch continues when no controlling terminal is present
pre = ''
post = ''
run_args = {}
if float(__grains__['osrelease']) >= 10.2:
post += '--not-running-from-cron'
else:
pre += ' env PAGER=cat'
run_args['python_shell'] = True
return _wrapper('fetch', pre=pre, post=post, run_args=run_args, **kwargs) | .. versionadded:: 2016.3.4
freebsd-update fetch wrapper. Based on the currently installed world and the
configuration options set, fetch all available binary updates.
kwargs:
Parameters of freebsd-update command. |
def quad_info(name, quad, pretty):
'''Get information for a specific mosaic quad'''
cl = clientv1()
mosaic, = cl.get_mosaic_by_name(name).items_iter(1)
echo_json_response(call_and_wrap(cl.get_quad_by_id, mosaic, quad), pretty) | Get information for a specific mosaic quad |
def from_vertices_and_edges(vertices, edges, vertex_name_key='name', vertex_id_key='id',
edge_foreign_keys=('source', 'target'), directed=True):
"""
This representation assumes that vertices and edges are encoded in
two lists, each list containing a Python dict for each vertex and
each edge, respectively. A distinguished element of the vertex dicts
contain a vertex ID which is used in the edge dicts to refer to
source and target vertices. All the remaining elements of the dicts
are considered vertex and edge attributes.
@param vertices: a list of dicts for the vertices.
@param edges: a list of dicts for the edges.
@param vertex_name_key: the name of the distinguished key in the dicts
in the vertex data source that contains the vertex names. Will also be used
as vertex label.
@param vertex_id_key: the name of the distinguished key in the dicts
in the vertex data source that contains a unique identifier for the vertex.
@param edge_foreign_keys: the name of the attributes in the dicts in C{edges}
that contain the source and target vertex names.
@return: IGraph instance with integers for vertex ids, edge sources, and edge targets.
"""
vertex_data = _dicts_to_columns(vertices)
edge_data = _dicts_to_columns(edges)
n = len(vertices)
vertex_index = dict(zip(vertex_data[vertex_id_key], range(n)))
# Iterate over `edges` to create `edge_list`, where every list item is a pair of integers.
edge_list = list(map(lambda source, target: (vertex_index[source], vertex_index[target]),
edge_data[edge_foreign_keys[0]],
edge_data[edge_foreign_keys[1]]))
g = IGraph(n=n, edges=edge_list, directed=directed, vertex_attrs=vertex_data, edge_attrs=edge_data)
g.vs['name'] = g.vs[vertex_name_key]
g.vs['indegree'] = g.degree(mode="in")
g.vs['outdegree'] = g.degree(mode="out")
g.vs['label'] = g.vs[vertex_name_key]
if 'group' not in g.vs.attributes():
g.vs['group'] = labels_to_groups(g.vs['label'])
return g | This representation assumes that vertices and edges are encoded in
two lists, each list containing a Python dict for each vertex and
each edge, respectively. A distinguished element of the vertex dicts
contain a vertex ID which is used in the edge dicts to refer to
source and target vertices. All the remaining elements of the dicts
are considered vertex and edge attributes.
@param vertices: a list of dicts for the vertices.
@param edges: a list of dicts for the edges.
@param vertex_name_key: the name of the distinguished key in the dicts
in the vertex data source that contains the vertex names. Will also be used
as vertex label.
@param vertex_id_key: the name of the distinguished key in the dicts
in the vertex data source that contains a unique identifier for the vertex.
@param edge_foreign_keys: the name of the attributes in the dicts in C{edges}
that contain the source and target vertex names.
@return: IGraph instance with integers for vertex ids, edge sources, and edge targets. |
def render(obj):
"""Convienently render strings with the fabric context"""
def get_v(v):
return v % env if isinstance(v, basestring) else v
if isinstance(obj, types.StringType):
return obj % env
elif isinstance(obj, types.TupleType) or isinstance(obj, types.ListType):
rv = []
for v in obj:
rv.append(get_v(v))
elif isinstance(obj, types.DictType):
rv = {}
for k, v in obj.items():
rv[k] = get_v(v)
return rv | Convienently render strings with the fabric context |
def get_relevant_policy_section(self, policy_name, group=None):
"""
Look up the policy corresponding to the provided policy name and
group (optional). Log any issues found during the look up.
"""
policy_bundle = self._operation_policies.get(policy_name)
if not policy_bundle:
self._logger.warning(
"The '{}' policy does not exist.".format(policy_name)
)
return None
if group:
groups_policy_bundle = policy_bundle.get('groups')
if not groups_policy_bundle:
self._logger.debug(
"The '{}' policy does not support groups.".format(
policy_name
)
)
return None
else:
group_policy = groups_policy_bundle.get(group)
if not group_policy:
self._logger.debug(
"The '{}' policy does not support group '{}'.".format(
policy_name,
group
)
)
return None
else:
return group_policy
else:
return policy_bundle.get('preset') | Look up the policy corresponding to the provided policy name and
group (optional). Log any issues found during the look up. |
def validate(self, key, value):
"""Validation function run before setting. Uses function from __init__."""
if self._validator is not None:
self._validator(key, value) | Validation function run before setting. Uses function from __init__. |
def assert_satisfies(v, cond, message=None):
"""
Assert that variable satisfies the provided condition.
:param v: variable to check. Its value is only used for error reporting.
:param bool cond: condition that must be satisfied. Should be somehow related to the variable ``v``.
:param message: message string to use instead of the default.
"""
if not cond:
vname, vexpr = _retrieve_assert_arguments()
if not message:
message = "Argument `{var}` (= {val!r}) does not satisfy the condition {expr}" \
.format(var=vname, val=v, expr=vexpr)
raise H2OValueError(message=message, var_name=vname, skip_frames=1) | Assert that variable satisfies the provided condition.
:param v: variable to check. Its value is only used for error reporting.
:param bool cond: condition that must be satisfied. Should be somehow related to the variable ``v``.
:param message: message string to use instead of the default. |
def close(self):
""" Close the connection.
"""
if not self._closed:
if self.protocol_version >= 3:
log_debug("[#%04X] C: GOODBYE", self.local_port)
self._append(b"\x02", ())
try:
self.send()
except ServiceUnavailable:
pass
log_debug("[#%04X] C: <CLOSE>", self.local_port)
try:
self.socket.close()
except IOError:
pass
finally:
self._closed = True | Close the connection. |
def invert_projection(self, X, identities):
"""
Calculate the inverted projection.
The inverted projectio of a SOM is created by association each weight
with the input which matches it the most, thus giving a good
approximation of the "influence" of each input item.
Works best for symbolic (instead of continuous) input data.
Parameters
----------
X : numpy array
Input data
identities : list
A list of names for each of the input data. Must be the same
length as X.
Returns
-------
m : numpy array
An array with the same shape as the map
"""
distances = self.transform(X)
if len(distances) != len(identities):
raise ValueError("X and identities are not the same length: "
"{0} and {1}".format(len(X), len(identities)))
node_match = []
for d in distances.__getattribute__(self.argfunc)(0):
node_match.append(identities[d])
return np.array(node_match) | Calculate the inverted projection.
The inverted projectio of a SOM is created by association each weight
with the input which matches it the most, thus giving a good
approximation of the "influence" of each input item.
Works best for symbolic (instead of continuous) input data.
Parameters
----------
X : numpy array
Input data
identities : list
A list of names for each of the input data. Must be the same
length as X.
Returns
-------
m : numpy array
An array with the same shape as the map |
def plotRatePSD(include=['allCells', 'eachPop'], timeRange=None, binSize=5, maxFreq=100, NFFT=256, noverlap=128, smooth=0, overlay=True,
ylim = None, popColors = {}, fontSize=12, figSize=(10,8), saveData=None, saveFig=None, showFig=True):
'''
Plot firing rate power spectral density (PSD)
- include (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): List of data series to include.
Note: one line per item, not grouped (default: ['allCells', 'eachPop'])
- timeRange ([start:stop]): Time range of spikes shown; if None shows all (default: None)
- binSize (int): Size in ms of spike bins (default: 5)
- maxFreq (float): Maximum frequency to show in plot (default: 100)
- NFFT (float): The number of data points used in each block for the FFT (power of 2) (default: 256)
- smooth (int): Window size for smoothing; no smoothing if 0 (default: 0)
- overlay (True|False): Whether to overlay the data lines or plot in separate subplots (default: True)
- graphType ('line'|'bar'): Type of graph to use (line graph or bar plot) (default: 'line')
- yaxis ('rate'|'count'): Units of y axis (firing rate in Hz, or spike count) (default: 'rate')
- popColors (dict): Dictionary with color (value) used for each population (key) (default: None)
- figSize ((width, height)): Size of figure (default: (10,8))
- saveData (None|True|'fileName'): File name where to save the final data used to generate the figure;
if set to True uses filename from simConfig (default: None)
- saveFig (None|True|'fileName'): File name where to save the figure;
if set to True uses filename from simConfig (default: None)
- showFig (True|False): Whether to show the figure or not (default: True)
- Returns figure handle
'''
from .. import sim
print('Plotting firing rate power spectral density (PSD) ...')
# Replace 'eachPop' with list of pops
if 'eachPop' in include:
include.remove('eachPop')
for pop in sim.net.allPops: include.append(pop)
# time range
if timeRange is None:
timeRange = [0,sim.cfg.duration]
histData = []
# create fig
fig,ax1 = plt.subplots(figsize=figSize)
fontsiz = fontSize
# set font size
plt.rcParams.update({'font.size': fontSize})
allPower, allSignal, allFreqs = [], [], []
# Plot separate line for each entry in include
for iplot,subset in enumerate(include):
cells, cellGids, netStimLabels = getCellsInclude([subset])
numNetStims = 0
# Select cells to include
if len(cellGids) > 0:
try:
spkinds,spkts = list(zip(*[(spkgid,spkt) for spkgid,spkt in zip(sim.allSimData['spkid'],sim.allSimData['spkt']) if spkgid in cellGids]))
except:
spkinds,spkts = [],[]
else:
spkinds,spkts = [],[]
# Add NetStim spikes
spkts, spkinds = list(spkts), list(spkinds)
numNetStims = 0
if 'stims' in sim.allSimData:
for netStimLabel in netStimLabels:
netStimSpks = [spk for cell,stims in sim.allSimData['stims'].items() \
for stimLabel,stimSpks in stims.items() for spk in stimSpks if stimLabel == netStimLabel]
if len(netStimSpks) > 0:
lastInd = max(spkinds) if len(spkinds)>0 else 0
spktsNew = netStimSpks
spkindsNew = [lastInd+1+i for i in range(len(netStimSpks))]
spkts.extend(spktsNew)
spkinds.extend(spkindsNew)
numNetStims += 1
histo = np.histogram(spkts, bins = np.arange(timeRange[0], timeRange[1], binSize))
histoT = histo[1][:-1]+binSize/2
histoCount = histo[0]
histoCount = histoCount * (1000.0 / binSize) / (len(cellGids)+numNetStims) # convert to rates
histData.append(histoCount)
color = popColors[subset] if isinstance(subset, (str, tuple)) and subset in popColors else colorList[iplot%len(colorList)]
if not overlay:
plt.subplot(len(include),1,iplot+1) # if subplot, create new subplot
title (str(subset), fontsize=fontsiz)
color = 'blue'
Fs = 1000.0/binSize # ACTUALLY DEPENDS ON BIN WINDOW!!! RATE NOT SPIKE!
power = mlab.psd(histoCount, Fs=Fs, NFFT=NFFT, detrend=mlab.detrend_none, window=mlab.window_hanning,
noverlap=noverlap, pad_to=None, sides='default', scale_by_freq=None)
if smooth:
signal = _smooth1d(10*np.log10(power[0]), smooth)
else:
signal = 10*np.log10(power[0])
freqs = power[1]
allFreqs.append(freqs)
allPower.append(power)
allSignal.append(signal)
plt.plot(freqs[freqs<maxFreq], signal[freqs<maxFreq], linewidth=1.5, color=color)
plt.xlabel('Frequency (Hz)', fontsize=fontsiz)
plt.ylabel('Power Spectral Density (dB/Hz)', fontsize=fontsiz) # add yaxis in opposite side
plt.xlim([0, maxFreq])
if ylim: plt.ylim(ylim)
# if len(include) < 5: # if apply tight_layout with many subplots it inverts the y-axis
# try:
# plt.tight_layout()
# except:
# pass
# Add legend
if overlay:
for i,subset in enumerate(include):
color = popColors[subset] if isinstance(subset, basestring) and subset in popColors else colorList[i%len(colorList)]
plt.plot(0,0,color=color,label=str(subset))
plt.legend(fontsize=fontsiz, loc=1)#, bbox_to_anchor=(1.04, 1), loc=2, borderaxespad=0.)
maxLabelLen = min(10,max([len(str(l)) for l in include]))
#plt.subplots_adjust(right=(0.9-0.012*maxLabelLen))
# save figure data
if saveData:
figData = {'histData': histData, 'histT': histoT, 'include': include, 'timeRange': timeRange, 'binSize': binSize,
'saveData': saveData, 'saveFig': saveFig, 'showFig': showFig}
_saveFigData(figData, saveData, 'spikeHist')
# save figure
if saveFig:
if isinstance(saveFig, basestring):
filename = saveFig
else:
filename = sim.cfg.filename+'_'+'spikePSD.png'
plt.savefig(filename)
# show fig
if showFig: _showFigure()
return fig, {'allSignal':allSignal, 'allPower':allPower, 'allFreqs':allFreqs} | Plot firing rate power spectral density (PSD)
- include (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): List of data series to include.
Note: one line per item, not grouped (default: ['allCells', 'eachPop'])
- timeRange ([start:stop]): Time range of spikes shown; if None shows all (default: None)
- binSize (int): Size in ms of spike bins (default: 5)
- maxFreq (float): Maximum frequency to show in plot (default: 100)
- NFFT (float): The number of data points used in each block for the FFT (power of 2) (default: 256)
- smooth (int): Window size for smoothing; no smoothing if 0 (default: 0)
- overlay (True|False): Whether to overlay the data lines or plot in separate subplots (default: True)
- graphType ('line'|'bar'): Type of graph to use (line graph or bar plot) (default: 'line')
- yaxis ('rate'|'count'): Units of y axis (firing rate in Hz, or spike count) (default: 'rate')
- popColors (dict): Dictionary with color (value) used for each population (key) (default: None)
- figSize ((width, height)): Size of figure (default: (10,8))
- saveData (None|True|'fileName'): File name where to save the final data used to generate the figure;
if set to True uses filename from simConfig (default: None)
- saveFig (None|True|'fileName'): File name where to save the figure;
if set to True uses filename from simConfig (default: None)
- showFig (True|False): Whether to show the figure or not (default: True)
- Returns figure handle |
def convert_la_to_rgba(self, row, result):
"""Convert a grayscale image with alpha to RGBA."""
for i in range(len(row) // 3):
for j in range(3):
result[(4 * i) + j] = row[2 * i]
result[(4 * i) + 3] = row[(2 * i) + 1] | Convert a grayscale image with alpha to RGBA. |
def PostRegistration(method):
# pylint: disable=C0103
"""
The service post-registration callback decorator is called after a service
of the component has been registered to the framework.
The decorated method must accept the
:class:`~pelix.framework.ServiceReference` of the registered
service as argument::
@PostRegistration
def callback_method(self, service_reference):
'''
service_reference: The ServiceReference of the provided service
'''
# ...
:param method: The decorated method
:raise TypeError: The decorated element is not a valid function
"""
if not isinstance(method, types.FunctionType):
raise TypeError("@PostRegistration can only be applied on functions")
# Tests the number of parameters
validate_method_arity(method, "service_reference")
_append_object_entry(
method,
constants.IPOPO_METHOD_CALLBACKS,
constants.IPOPO_CALLBACK_POST_REGISTRATION,
)
return method | The service post-registration callback decorator is called after a service
of the component has been registered to the framework.
The decorated method must accept the
:class:`~pelix.framework.ServiceReference` of the registered
service as argument::
@PostRegistration
def callback_method(self, service_reference):
'''
service_reference: The ServiceReference of the provided service
'''
# ...
:param method: The decorated method
:raise TypeError: The decorated element is not a valid function |
def content():
"""Helper method that returns just the content.
This method was added so that the text could be reused in the
dock_help module.
.. versionadded:: 3.2.2
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message
"""
message = m.Message()
paragraph = m.Paragraph(
m.Image(
'file:///%s/img/screenshots/'
'osm-downloader-screenshot.png' % resources_path()),
style_class='text-center'
)
message.add(paragraph)
body = tr(
'This tool will fetch building (\'structure\') or road ('
'\'highway\') data from the OpenStreetMap project for you. '
'The downloaded data will have InaSAFE keywords defined and a '
'default QGIS style applied. To use this tool effectively:'
)
tips = m.BulletedList()
tips.add(tr(
'Your current extent, when opening this window, will be used to '
'determine the area for which you want data to be retrieved. '
'You can interactively select the area by using the '
'\'select on map\' button - which will temporarily hide this '
'window and allow you to drag a rectangle on the map. After you '
'have finished dragging the rectangle, this window will '
'reappear.'))
tips.add(tr(
'Check the output directory is correct. Note that the saved '
'dataset will be named after the type of data being downloaded '
'e.g. roads.shp or buildings.shp (and associated files).'
))
tips.add(tr(
'By default simple file names will be used (e.g. roads.shp, '
'buildings.shp). If you wish you can specify a prefix to '
'add in front of this default name. For example using a prefix '
'of \'padang-\' will cause the downloaded files to be saved as '
'\'padang-roads.shp\' and \'padang-buildings.shp\'. Note that '
'the only allowed prefix characters are A-Z, a-z, 0-9 and the '
'characters \'-\' and \'_\'. You can leave this blank if you '
'prefer.'
))
tips.add(tr(
'If a dataset already exists in the output directory it will be '
'overwritten.'
))
tips.add(tr(
'This tool requires a working internet connection and fetching '
'buildings or roads will consume your bandwidth.'))
tips.add(m.Link(
'http://www.openstreetmap.org/copyright',
text=tr(
'Downloaded data is copyright OpenStreetMap contributors '
'(click for more info).')
))
message.add(m.Paragraph(body))
message.add(tips)
message.add(m.Paragraph(
# format 'When the __Political boundaries__' for proper i18n
tr('When the %s '
'box in the Feature types menu is ticked, the Political boundary '
'options panel will be enabled. The panel lets you select which '
'admin level you wish to download. The admin levels are country '
'specific. When you select an admin level, the local name for '
'that admin level will be shown. You can change which country '
'is used for the admin level description using the country drop '
'down menu. The country will be automatically set to coincide '
'with the view extent if a matching country can be found.') %
(
m.ImportantText(tr('Political boundaries')).to_html(),
)))
message.add(m.Paragraph(
m.ImportantText(tr('Note: ')),
tr(
'We have only provide presets for a subset of the available '
'countries. If you want to know what the levels are for your '
'country, please check on the following web page: '),
m.Link(
'http://wiki.openstreetmap.org/wiki/Tag:boundary%3Dadministrative',
text=tr(
'List of OSM Admin Boundary definitions '))))
return message | Helper method that returns just the content.
This method was added so that the text could be reused in the
dock_help module.
.. versionadded:: 3.2.2
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message |
def check_dupl_sources(self): # used in print_csm_info
"""
Extracts duplicated sources, i.e. sources with the same source_id in
different source groups. Raise an exception if there are sources with
the same ID which are not duplicated.
:returns: a list of list of sources, ordered by source_id
"""
dd = collections.defaultdict(list)
for src_group in self.src_groups:
for src in src_group:
try:
srcid = src.source_id
except AttributeError: # src is a Node object
srcid = src['id']
dd[srcid].append(src)
dupl = []
for srcid, srcs in sorted(dd.items()):
if len(srcs) > 1:
_assert_equal_sources(srcs)
dupl.append(srcs)
return dupl | Extracts duplicated sources, i.e. sources with the same source_id in
different source groups. Raise an exception if there are sources with
the same ID which are not duplicated.
:returns: a list of list of sources, ordered by source_id |
def get_data(self):
"Get SNMP values from host"
alarm_oids = [netsnmp.Varbind(alarms[alarm_id]['oid']) for alarm_id in self.models[self.modem_type]['alarms']]
metric_oids = [netsnmp.Varbind(metrics[metric_id]['oid']) for metric_id in self.models[self.modem_type]['metrics']]
response = self.snmp_session.get(netsnmp.VarList(*alarm_oids + metric_oids))
return (
response[0:len(alarm_oids)],
response[len(alarm_oids):]
) | Get SNMP values from host |
def start(self):
"""
Start downloading, handling auto retry, download resume and path
moving
"""
if not self.auto_retry:
self.curl()
return
while not self.is_finished:
try:
self.curl()
except pycurl.error as e:
# transfer closed with n bytes remaining to read
if e.args[0] == pycurl.E_PARTIAL_FILE:
pass
# HTTP server doesn't seem to support byte ranges.
# Cannot resume.
elif e.args[0] == pycurl.E_HTTP_RANGE_ERROR:
break
# Recv failure: Connection reset by peer
elif e.args[0] == pycurl.E_RECV_ERROR:
if self._rst_retries < self.max_rst_retries:
pass
else:
raise e
self._rst_retries += 1
else:
raise e
self._move_path()
self._done() | Start downloading, handling auto retry, download resume and path
moving |
def remove_target(self, target_name: str):
"""Remove (unregister) a `target` from this build context.
Removes the target instance with the given name, if it exists,
from both the `targets` map and the `targets_by_module` map.
Doesn't do anything if no target with that name is found.
Doesn't touch the target graph, if it exists.
"""
if target_name in self.targets:
del self.targets[target_name]
build_module = split_build_module(target_name)
if build_module in self.targets_by_module:
self.targets_by_module[build_module].remove(target_name) | Remove (unregister) a `target` from this build context.
Removes the target instance with the given name, if it exists,
from both the `targets` map and the `targets_by_module` map.
Doesn't do anything if no target with that name is found.
Doesn't touch the target graph, if it exists. |
def subn_filter(s, find, replace, count=0):
"""A non-optimal implementation of a regex filter"""
return re.gsub(find, replace, count, s) | A non-optimal implementation of a regex filter |
def remove_last(ol,value,**kwargs):
'''
from elist.elist import *
ol = [1,'a',3,'a',5,'a']
id(ol)
new = remove_last(ol,'a')
ol
new
id(ol)
id(new)
####
ol = [1,'a',3,'a',5,'a']
id(ol)
rslt = remove_last(ol,'a',mode="original")
ol
rslt
id(ol)
id(rslt)
'''
if('mode' in kwargs):
mode = kwargs["mode"]
else:
mode = "new"
new = copy.deepcopy(ol)
new.reverse()
new.remove(value)
new.reverse()
if(mode == "new"):
return(new)
else:
ol.clear()
ol.extend(new)
return(ol) | from elist.elist import *
ol = [1,'a',3,'a',5,'a']
id(ol)
new = remove_last(ol,'a')
ol
new
id(ol)
id(new)
####
ol = [1,'a',3,'a',5,'a']
id(ol)
rslt = remove_last(ol,'a',mode="original")
ol
rslt
id(ol)
id(rslt) |
def _adjust_auto(self, real_wave_mfcc, algo_parameters):
"""
AUTO (do not modify)
"""
self.log(u"Called _adjust_auto")
self.log(u"Nothing to do, return unchanged") | AUTO (do not modify) |
def start(self):
"""Creates a TCP connection to Device Cloud and sends a ConnectionRequest message"""
self.log.info("Starting Insecure Session for Monitor %s" % self.monitor_id)
if self.socket is not None:
raise Exception("Socket already established for %s." % self)
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.client.hostname, PUSH_OPEN_PORT))
self.socket.setblocking(0)
except socket.error as exception:
self.socket.close()
self.socket = None
raise
self.send_connection_request() | Creates a TCP connection to Device Cloud and sends a ConnectionRequest message |
def after_unassign(duplicate_analysis):
"""Removes the duplicate from the system
"""
analysis_events.after_unassign(duplicate_analysis)
parent = duplicate_analysis.aq_parent
logger.info("Removing duplicate '{}' from '{}'"
.format(duplicate_analysis.getId(), parent.getId()))
parent.manage_delObjects([duplicate_analysis.getId()]) | Removes the duplicate from the system |
def create_oracle(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a oracle database using cx_oracle.
"""
return create_engine(
_create_oracle(username, password, host, port, database),
**kwargs
) | create an engine connected to a oracle database using cx_oracle. |
def payload_class_for_element_name(element_name):
"""Return a payload class for given element name."""
logger.debug(" looking up payload class for element: {0!r}".format(
element_name))
logger.debug(" known: {0!r}".format(STANZA_PAYLOAD_CLASSES))
if element_name in STANZA_PAYLOAD_CLASSES:
return STANZA_PAYLOAD_CLASSES[element_name]
else:
return XMLPayload | Return a payload class for given element name. |
def _set_gbc(self, v, load=False):
"""
Setter method for gbc, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/gbc (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_gbc is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_gbc() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=gbc.gbc, is_container='container', presence=False, yang_name="gbc", rest_name="gbc", parent=self, choice=(u'interface-identifier', u'gbic'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """gbc must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=gbc.gbc, is_container='container', presence=False, yang_name="gbc", rest_name="gbc", parent=self, choice=(u'interface-identifier', u'gbic'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='container', is_config=True)""",
})
self.__gbc = t
if hasattr(self, '_set'):
self._set() | Setter method for gbc, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/gbc (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_gbc is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_gbc() directly. |
def get_workflow(self):
"""Returns the instantiated workflow class."""
extra_context = self.get_initial()
entry_point = self.request.GET.get("step", None)
workflow = self.workflow_class(self.request,
context_seed=extra_context,
entry_point=entry_point)
return workflow | Returns the instantiated workflow class. |
def preferred_width(self, cli, max_available_width):
"""
Report the width of the longest meta text as the preferred width of this control.
It could be that we use less width, but this way, we're sure that the
layout doesn't change when we select another completion (E.g. that
completions are suddenly shown in more or fewer columns.)
"""
if cli.current_buffer.complete_state:
state = cli.current_buffer.complete_state
return 2 + max(get_cwidth(c.display_meta) for c in state.current_completions)
else:
return 0 | Report the width of the longest meta text as the preferred width of this control.
It could be that we use less width, but this way, we're sure that the
layout doesn't change when we select another completion (E.g. that
completions are suddenly shown in more or fewer columns.) |
def _get_adjusted_merge_area(self, attrs, insertion_point, no_to_insert,
axis):
"""Returns updated merge area
Parameters
----------
attrs: Dict
\tCell attribute dictionary that shall be adjusted
insertion_point: Integer
\tPont on axis, before which insertion takes place
no_to_insert: Integer >= 0
\tNumber of rows/cols/tabs that shall be inserted
axis: Integer in range(2)
\tSpecifies number of dimension, i.e. 0 == row, 1 == col
"""
assert axis in range(2)
if "merge_area" not in attrs or attrs["merge_area"] is None:
return
top, left, bottom, right = attrs["merge_area"]
selection = Selection([(top, left)], [(bottom, right)], [], [], [])
selection.insert(insertion_point, no_to_insert, axis)
__top, __left = selection.block_tl[0]
__bottom, __right = selection.block_br[0]
# Adjust merge area if it is beyond the grid shape
rows, cols, tabs = self.shape
if __top < 0 and __bottom < 0 or __top >= rows and __bottom >= rows or\
__left < 0 and __right < 0 or __left >= cols and __right >= cols:
return
if __top < 0:
__top = 0
if __top >= rows:
__top = rows - 1
if __bottom < 0:
__bottom = 0
if __bottom >= rows:
__bottom = rows - 1
if __left < 0:
__left = 0
if __left >= cols:
__left = cols - 1
if __right < 0:
__right = 0
if __right >= cols:
__right = cols - 1
return __top, __left, __bottom, __right | Returns updated merge area
Parameters
----------
attrs: Dict
\tCell attribute dictionary that shall be adjusted
insertion_point: Integer
\tPont on axis, before which insertion takes place
no_to_insert: Integer >= 0
\tNumber of rows/cols/tabs that shall be inserted
axis: Integer in range(2)
\tSpecifies number of dimension, i.e. 0 == row, 1 == col |
def get_variants(data, include_germline=False):
"""Retrieve set of variant calls to use for heterogeneity analysis.
"""
data = utils.deepish_copy(data)
supported = ["precalled", "vardict", "vardict-java", "vardict-perl",
"freebayes", "octopus", "strelka2"]
# Right now mutect2 and mutect do not provide heterozygous germline calls
# to be useful https://github.com/bcbio/bcbio-nextgen/issues/2464
# supported += ["mutect2", "mutect"]
if include_germline:
supported.insert(1, "gatk-haplotype")
out = []
# CWL based input
if isinstance(data.get("variants"), dict) and "samples" in data["variants"]:
cur_vs = []
# Unpack single sample list of files
if (isinstance(data["variants"]["samples"], (list, tuple)) and
len(data["variants"]["samples"]) == 1 and isinstance(data["variants"]["samples"][0], (list, tuple))):
data["variants"]["samples"] = data["variants"]["samples"][0]
for fname in data["variants"]["samples"]:
variantcaller = utils.splitext_plus(os.path.basename(fname))[0]
variantcaller = variantcaller.replace(dd.get_sample_name(data) + "-", "")
for batch in dd.get_batches(data):
variantcaller = variantcaller.replace(batch + "-", "")
cur_vs.append({"vrn_file": fname, "variantcaller": variantcaller})
data["variants"] = cur_vs
for v in data.get("variants", []):
if v["variantcaller"] in supported and v.get("vrn_file"):
out.append((supported.index(v["variantcaller"]), v))
out.sort()
return [xs[1] for xs in out] | Retrieve set of variant calls to use for heterogeneity analysis. |
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i | Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3 |
def create(self):
"""Create tracking collection.
Does nothing if tracking collection already exists.
"""
if self._track is None:
self._track = self.db[self.tracking_collection_name] | Create tracking collection.
Does nothing if tracking collection already exists. |
def from_file(filepath, delimiter='', blanklines=False):
"""Imports userdata from a file.
:type filepath: string
:param filepath The absolute path to the file.
:type delimiter: string
:param: delimiter Delimiter to use with the troposphere.Join().
:type blanklines: boolean
:param blanklines If blank lines shoud be ignored
rtype: troposphere.Base64
:return The base64 representation of the file.
"""
data = []
try:
with open(filepath, 'r') as f:
for line in f:
if blanklines and line.strip('\n\r ') == '':
continue
data.append(line)
except IOError:
raise IOError('Error opening or reading file: {}'.format(filepath))
return Base64(Join(delimiter, data)) | Imports userdata from a file.
:type filepath: string
:param filepath The absolute path to the file.
:type delimiter: string
:param: delimiter Delimiter to use with the troposphere.Join().
:type blanklines: boolean
:param blanklines If blank lines shoud be ignored
rtype: troposphere.Base64
:return The base64 representation of the file. |
def map_(input_layer, fn):
"""Maps the given function across this sequence.
To map an entire template across the sequence, use the `as_fn` method on the
template.
Args:
input_layer: The input tensor.
fn: A function of 1 argument that is applied to each item in the sequence.
Returns:
A new sequence Pretty Tensor.
Raises:
ValueError: If the input_layer does not hold a sequence.
"""
if not input_layer.is_sequence():
raise ValueError('Can only map a sequence.')
return [fn(x) for x in input_layer] | Maps the given function across this sequence.
To map an entire template across the sequence, use the `as_fn` method on the
template.
Args:
input_layer: The input tensor.
fn: A function of 1 argument that is applied to each item in the sequence.
Returns:
A new sequence Pretty Tensor.
Raises:
ValueError: If the input_layer does not hold a sequence. |
async def send_from_directory(
directory: FilePath,
file_name: str,
*,
mimetype: Optional[str]=None,
as_attachment: bool=False,
attachment_filename: Optional[str]=None,
add_etags: bool=True,
cache_timeout: Optional[int]=None,
conditional: bool=True,
last_modified: Optional[datetime]=None,
) -> Response:
"""Send a file from a given directory.
Arguments:
directory: Directory that when combined with file_name gives
the file path.
file_name: File name that when combined with directory gives
the file path.
See :func:`send_file` for the other arguments.
"""
file_path = safe_join(directory, file_name)
if not file_path.is_file():
raise NotFound()
return await send_file(
file_path,
mimetype=mimetype,
as_attachment=as_attachment,
attachment_filename=attachment_filename,
add_etags=add_etags,
cache_timeout=cache_timeout,
conditional=conditional,
last_modified=last_modified,
) | Send a file from a given directory.
Arguments:
directory: Directory that when combined with file_name gives
the file path.
file_name: File name that when combined with directory gives
the file path.
See :func:`send_file` for the other arguments. |
def build_global(self, global_node):
"""parse `global` section, and return the config.Global
Args:
global_node (TreeNode): `global` section treenode
Returns:
config.Global: an object
"""
config_block_lines = self.__build_config_block(
global_node.config_block)
return config.Global(config_block=config_block_lines) | parse `global` section, and return the config.Global
Args:
global_node (TreeNode): `global` section treenode
Returns:
config.Global: an object |
def get_first_node(
node,
node_not_to_step_past
):
"""
This is a super hacky way of getting the first node after a statement.
We do this because we visit a statement and keep on visiting and get something in return that is rarely the first node.
So we loop and loop backwards until we hit the statement or there is nothing to step back to.
"""
ingoing = None
i = 0
current_node = node
while current_node.ingoing:
# This is used because there may be multiple ingoing and loop will cause an infinite loop if we did [0]
i = random.randrange(len(current_node.ingoing))
# e.g. We don't want to step past the Except of an Except basic block
if current_node.ingoing[i] == node_not_to_step_past:
break
ingoing = current_node.ingoing
current_node = current_node.ingoing[i]
if ingoing:
return ingoing[i]
return current_node | This is a super hacky way of getting the first node after a statement.
We do this because we visit a statement and keep on visiting and get something in return that is rarely the first node.
So we loop and loop backwards until we hit the statement or there is nothing to step back to. |
def get_reviews(self, user_id):
""" Get reviews for a particular user
"""
url = _REVIEWS_USER.format(c_api=_C_API_BEGINNING,
api=_API_VERSION,
user_id=user_id,
at=self.access_token)
return _get_request(url) | Get reviews for a particular user |
def conn_aws(cred, crid):
"""Establish connection to AWS service."""
driver = get_driver(Provider.EC2)
try:
aws_obj = driver(cred['aws_access_key_id'],
cred['aws_secret_access_key'],
region=cred['aws_default_region'])
except SSLError as e:
abort_err("\r SSL Error with AWS: {}".format(e))
except InvalidCredsError as e:
abort_err("\r Error with AWS Credentials: {}".format(e))
return {crid: aws_obj} | Establish connection to AWS service. |
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError("Cannot uninstall requirement %s, not installed" % (self.name,))
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
pip_egg_info_path = os.path.join(dist.location,
dist.egg_name()) + '.egg-info'
# workaround for http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367
debian_egg_info_path = pip_egg_info_path.replace(
'-py%s' % pkg_resources.PY_MAJOR, '')
easy_install_egg = dist.egg_name() + '.egg'
develop_egg_link = egg_link_path(dist)
pip_egg_info_exists = os.path.exists(pip_egg_info_path)
debian_egg_info_exists = os.path.exists(debian_egg_info_path)
if pip_egg_info_exists or debian_egg_info_exists:
# package installed by pip
if pip_egg_info_exists:
egg_info_path = pip_egg_info_path
else:
egg_info_path = debian_egg_info_path
paths_to_remove.add(egg_info_path)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata('installed-files.txt').splitlines():
path = os.path.normpath(os.path.join(egg_info_path, installed_file))
paths_to_remove.add(path)
#FIXME: need a test for this elif block
#occurs with --single-version-externally-managed/--record outside of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif dist.location.endswith(easy_install_egg):
# package installed by easy_install
paths_to_remove.add(dist.location)
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif develop_egg_link:
# develop egg
fh = open(develop_egg_link, 'r')
link_pointer = os.path.normcase(fh.readline().strip())
fh.close()
assert (link_pointer == dist.location), 'Egg-link %s does not match installed location of %s (at %s)' % (link_pointer, self.name, dist.location)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
paths_to_remove.add(os.path.join(bin_py, script))
if sys.platform == 'win32':
paths_to_remove.add(os.path.join(bin_py, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = ConfigParser.SafeConfigParser()
config.readfp(FakeFile(dist.get_metadata_lines('entry_points.txt')))
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
paths_to_remove.add(os.path.join(bin_py, name))
if sys.platform == 'win32':
paths_to_remove.add(os.path.join(bin_py, name) + '.exe')
paths_to_remove.add(os.path.join(bin_py, name) + '.exe.manifest')
paths_to_remove.add(os.path.join(bin_py, name) + '-script.py')
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove | Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages. |
def inbox(request, template_name='django_messages/inbox.html'):
"""
Displays a list of received messages for the current user.
Optional Arguments:
``template_name``: name of the template to use.
"""
message_list = Message.objects.inbox_for(request.user)
return render(request, template_name, {
'message_list': message_list,
}) | Displays a list of received messages for the current user.
Optional Arguments:
``template_name``: name of the template to use. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.