code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def load (self, jamfile_location):
assert isinstance(jamfile_location, basestring)
absolute = os.path.join(os.getcwd(), jamfile_location)
absolute = os.path.normpath(absolute)
jamfile_location = b2.util.path.relpath(os.getcwd(), absolute)
mname = self.module_name(jamfile_location)
if not mname in self.jamfile_modules:
if "--debug-loading" in self.manager.argv():
print "Loading Jamfile at '%s'" % jamfile_location
self.load_jamfile(jamfile_location, mname)
self.load_used_projects(mname)
return mname
|
Loads jamfile at the given location. After loading, project global
file and jamfile needed by the loaded one will be loaded recursively.
If the jamfile at that location is loaded already, does nothing.
Returns the project module for the Jamfile.
|
def _create_save_scenario_action(self):
icon = resources_path('img', 'icons', 'save-as-scenario.svg')
self.action_save_scenario = QAction(
QIcon(icon),
self.tr('Save Current Scenario'), self.iface.mainWindow())
message = self.tr('Save current scenario to text file')
self.action_save_scenario.setStatusTip(message)
self.action_save_scenario.setWhatsThis(message)
self.action_save_scenario.triggered.connect(self.save_scenario)
self.add_action(
self.action_save_scenario, add_to_toolbar=self.full_toolbar)
|
Create action for save scenario dialog.
|
def makePalette(color1, color2, N, hsv=True):
if hsv:
color1 = rgb2hsv(color1)
color2 = rgb2hsv(color2)
c1 = np.array(getColor(color1))
c2 = np.array(getColor(color2))
cols = []
for f in np.linspace(0, 1, N - 1, endpoint=True):
c = c1 * (1 - f) + c2 * f
if hsv:
c = np.array(hsv2rgb(c))
cols.append(c)
return cols
|
Generate N colors starting from `color1` to `color2`
by linear interpolation HSV in or RGB spaces.
:param int N: number of output colors.
:param color1: first rgb color.
:param color2: second rgb color.
:param bool hsv: if `False`, interpolation is calculated in RGB space.
.. hint:: Example: |colorpalette.py|_
|
def split_data(X, y, ratio=(0.8, 0.1, 0.1)):
assert(sum(ratio) == 1 and len(ratio) == 3)
X_train, X_rest, y_train, y_rest = train_test_split(
X, y, train_size=ratio[0])
X_val, X_test, y_val, y_test = train_test_split(
X_rest, y_rest, train_size=ratio[1])
return X_train, X_val, X_test, y_train, y_val, y_test
|
Splits data into a training, validation, and test set.
Args:
X: text data
y: data labels
ratio: the ratio for splitting. Default: (0.8, 0.1, 0.1)
Returns:
split data: X_train, X_val, X_test, y_train, y_val, y_test
|
def _HasOOOWrite(self, path):
size = tf.io.gfile.stat(path).length
old_size = self._finalized_sizes.get(path, None)
if size != old_size:
if old_size is None:
logger.error('File %s created after file %s even though it\'s '
'lexicographically earlier', path, self._path)
else:
logger.error('File %s updated even though the current file is %s',
path, self._path)
return True
else:
return False
|
Returns whether the path has had an out-of-order write.
|
def add_title_translation(self, title, language, source=None):
title_translation = self._sourced_dict(
source,
title=title,
language=language,
)
self._append_to('title_translations', title_translation)
|
Add title translation.
:param title: translated title
:type title: string
:param language: language for the original title
:type language: string (2 characters ISO639-1)
:param source: source for the given title
:type source: string
|
def getClassInPackageFromName(className, pkg):
n = getAvClassNamesInPackage(pkg)
i = n.index(className)
c = getAvailableClassesInPackage(pkg)
return c[i]
|
get a class from name within a package
|
def init_app(self, app, add_context_processor=True):
if not hasattr(app, 'login_manager'):
self.login_manager.init_app(
app,
add_context_processor=add_context_processor)
self.login_manager.login_message = None
self.login_manager.needs_refresh_message = None
self.login_manager.unauthorized_handler(self.unauthorized_callback)
|
Initialize with app configuration
|
def available_perm_status(user):
roles = get_user_roles(user)
permission_hash = {}
for role in roles:
permission_names = role.permission_names_list()
for permission_name in permission_names:
permission_hash[permission_name] = get_permission(
permission_name) in user.user_permissions.all()
return permission_hash
|
Get a boolean map of the permissions available to a user
based on that user's roles.
|
async def register_storage_library(storage_type: str, c_library: str, entry_point: str) -> None:
LOGGER.debug(
'WalletManager.register_storage_library >>> storage_type %s, c_library %s, entry_point %s',
storage_type,
c_library,
entry_point)
try:
stg_lib = CDLL(c_library)
result = stg_lib[entry_point]()
if result:
LOGGER.debug(
'WalletManager.register_storage_library <!< indy error code %s on storage library entry at %s',
result,
entry_point)
raise IndyError(result)
LOGGER.info('Loaded storage library type %s (%s)', storage_type, c_library)
except IndyError as x_indy:
LOGGER.debug(
'WalletManager.register_storage_library <!< indy error code %s on load of storage library %s %s',
x_indy.error_code,
storage_type,
c_library)
raise
LOGGER.debug('WalletManager.register_storage_library <<<')
|
Load a wallet storage plug-in.
An indy-sdk wallet storage plug-in is a shared library; relying parties must explicitly
load it before creating or opening a wallet with the plug-in.
The implementation loads a dynamic library and calls an entry point; internally,
the plug-in calls the indy-sdk wallet
async def register_wallet_storage_library(storage_type: str, c_library: str, fn_pfx: str).
:param storage_type: wallet storage type
:param c_library: plug-in library
:param entry_point: function to initialize the library
|
def verify(self):
if self._is_verified:
return
for proxy in self._proxies.values():
proxy.verify()
self._is_verified = True
|
Verifies expectations on all doubled objects.
:raise: ``MockExpectationError`` on the first expectation that is not satisfied, if any.
|
def name(self):
if self._name:
return self._name
return [
line.strip() for line in self.__doc__.split("\n")
if line.strip()][0]
|
Use the first line of docs string unless name set.
|
async def async_put_state(self, field: str, data: dict) -> dict:
session = self.session.put
url = self.api_url + field
jsondata = json.dumps(data)
response_dict = await async_request(session, url, data=jsondata)
return response_dict
|
Set state of object in deCONZ.
Field is a string representing a specific device in deCONZ
e.g. field='/lights/1/state'.
Data is a json object with what data you want to alter
e.g. data={'on': True}.
See Dresden Elektroniks REST API documentation for details:
http://dresden-elektronik.github.io/deconz-rest-doc/rest/
|
def save_translations(self, instance, translated_data):
for meta in self.Meta.model._parler_meta:
translations = translated_data.get(meta.rel_name, {})
for lang_code, model_fields in translations.items():
translation = instance._get_translated_model(lang_code, auto_create=True, meta=meta)
for field, value in model_fields.items():
setattr(translation, field, value)
instance.save_translations()
|
Save translation data into translation objects.
|
def list(self, wg_uuid, parent=None, flat=False, node_types=None):
url = "%(base)s/%(wg_uuid)s/nodes" % {
'base': self.local_base_url,
'wg_uuid': wg_uuid
}
param = []
if parent:
if isinstance(parent, (list,)):
if len(parent) >= 1:
parent = parent[-1]
param.append(("parent", parent))
if flat:
param.append(("flat", True))
if node_types:
for node_type in node_types:
param.append(("type", node_type))
encode = urllib.urlencode(param)
if encode:
url += "?"
url += encode
return self.core.list(url)
|
Get a list of workgroup nodes.
|
def pluck(self, column):
result = self.first([column])
if result:
return result[column]
|
Pluck a single column from the database.
:param column: THe column to pluck
:type column: str
:return: The column value
:rtype: mixed
|
def register_value_producer(self, value_name: str, source: Callable[..., pd.DataFrame]=None,
preferred_combiner: Callable=replace_combiner,
preferred_post_processor: Callable[..., pd.DataFrame]=None) -> Pipeline:
return self._value_manager.register_value_producer(value_name, source,
preferred_combiner,
preferred_post_processor)
|
Marks a ``Callable`` as the producer of a named value.
Parameters
----------
value_name :
The name of the new dynamic value pipeline.
source :
A callable source for the dynamic value pipeline.
preferred_combiner :
A strategy for combining the source and the results of any calls to mutators in the pipeline.
``vivarium`` provides the strategies ``replace_combiner`` (the default), ``list_combiner``,
and ``set_combiner`` which are importable from ``vivarium.framework.values``. Client code
may define additional strategies as necessary.
preferred_post_processor :
A strategy for processing the final output of the pipeline. ``vivarium`` provides the strategies
``rescale_post_processor`` and ``joint_value_post_processor`` which are importable from
``vivarium.framework.values``. Client code may define additional strategies as necessary.
Returns
-------
Callable
A callable reference to the named dynamic value pipeline.
|
def initbinset(self,binset=None):
if binset is None:
msg="(%s) does not have a defined binset in the wavecat table. The waveset of the spectrum will be used instead."%str(self.bandpass)
try:
self.binwave = self.bandpass.binset
except (KeyError, AttributeError):
self.binwave = self.spectrum.wave
print(msg)
if self.binwave is None:
self.binwave = self.spectrum.wave
print(msg)
else:
self.binwave=binset
|
Set ``self.binwave``.
By default, wavelength values for binning are inherited
from bandpass. If the bandpass has no binning information,
then source spectrum wavelengths are used. However, if
user provides values, then those are used without question.
Parameters
----------
binset : array_like or `None`
Wavelength values to be used for binning when converting to counts.
|
async def _auth_plain(self, username, password):
mechanism = "PLAIN"
credentials = "\0{}\0{}".format(username, password)
encoded_credentials = SMTP.b64enc(credentials)
try:
code, message = await self.do_cmd(
"AUTH", mechanism, encoded_credentials, success=(235, 503)
)
except SMTPCommandFailedError as e:
raise SMTPAuthenticationError(e.code, e.message, mechanism)
return code, message
|
Performs an authentication attempt using the PLAIN mechanism.
Protocol:
1. Format the username and password in a suitable way ;
2. The formatted string is base64-encoded ;
3. The string 'AUTH PLAIN' and a space character are prepended to
the base64-encoded username and password and sent to the
server ;
4. If the server replies with a 235 return code, user is
authenticated.
Args:
username (str): Identifier of the user trying to authenticate.
password (str): Password for the user.
Raises:
ConnectionResetError: If the connection with the server is
unexpectedely lost.
SMTPAuthenticationError: If the authentication attempt fails.
Returns:
(int, str): A (code, message) 2-tuple containing the server
response.
|
def find(self, node, path):
return node.find(path, namespaces=self.namespaces)
|
Wrapper for lxml`s find.
|
def querydict_to_multidict(query_dict, wrap=None):
wrap = wrap or (lambda val: val)
return MultiDict(chain.from_iterable(
six.moves.zip(repeat(key), (wrap(v) for v in vals))
for key, vals in six.iterlists(query_dict)
))
|
Returns a new `webob.MultiDict` from a `django.http.QueryDict`.
If `wrap` is provided, it's used to wrap the values.
|
def reverse_complement_sequences(records):
logging.info('Applying _reverse_complement_sequences generator: '
'transforming sequences into reverse complements.')
for record in records:
rev_record = SeqRecord(record.seq.reverse_complement(),
id=record.id, name=record.name,
description=record.description)
_reverse_annotations(record, rev_record)
yield rev_record
|
Transform sequences into reverse complements.
|
def table_from_cwb(source, *args, **kwargs):
return EventTable.read(source, 'waveburst', *args, format='root', **kwargs)
|
Read an `EventTable` from a Coherent WaveBurst ROOT file
This function just redirects to the format='root' reader with appropriate
defaults.
|
def command_exists(command):
for category, commands in iteritems(command_categories):
for existing_command in commands:
if existing_command.match(command):
return True
return False
|
Check if the given command was registered. In another words if it
exists.
|
async def process_updates(self, updates, fast: typing.Optional[bool] = True):
if fast:
tasks = []
for update in updates:
tasks.append(self.updates_handler.notify(update))
return await asyncio.gather(*tasks)
results = []
for update in updates:
results.append(await self.updates_handler.notify(update))
return results
|
Process list of updates
:param updates:
:param fast:
:return:
|
def color_for_level(level):
if not color_available:
return None
return {
logging.DEBUG: colorama.Fore.WHITE,
logging.INFO: colorama.Fore.BLUE,
logging.WARNING: colorama.Fore.YELLOW,
logging.ERROR: colorama.Fore.RED,
logging.CRITICAL: colorama.Fore.MAGENTA
}.get(level, colorama.Fore.WHITE)
|
Returns the colorama Fore color for a given log level.
If color is not available, returns None.
|
def absent(name,
vhost='/',
runas=None):
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
policy_exists = __salt__['rabbitmq.policy_exists'](
vhost, name, runas=runas)
if not policy_exists:
ret['comment'] = 'Policy \'{0} {1}\' is not present.'.format(vhost, name)
return ret
if not __opts__['test']:
result = __salt__['rabbitmq.delete_policy'](vhost, name, runas=runas)
if 'Error' in result:
ret['result'] = False
ret['comment'] = result['Error']
return ret
elif 'Deleted' in result:
ret['comment'] = 'Deleted'
ret['changes'] = {'new': '', 'old': name}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Policy \'{0} {1}\' will be removed.'.format(vhost, name)
return ret
|
Ensure the named policy is absent
Reference: http://www.rabbitmq.com/ha.html
name
The name of the policy to remove
runas
Name of the user to run the command as
|
def remove_sonos_playlist(self, sonos_playlist):
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
return self.contentDirectory.DestroyObject([('ObjectID', object_id)])
|
Remove a Sonos playlist.
Args:
sonos_playlist (DidlPlaylistContainer): Sonos playlist to remove
or the item_id (str).
Returns:
bool: True if succesful, False otherwise
Raises:
SoCoUPnPException: If sonos_playlist does not point to a valid
object.
|
def OnActivateReader(self, reader):
SimpleSCardAppEventObserver.OnActivateReader(self, reader)
self.feedbacktext.SetLabel('Activated reader: ' + repr(reader))
|
Called when a reader is activated by double-clicking on the
reader tree control or toolbar.
|
def close(self):
self.stopped.set()
for event in self.to_be_stopped:
event.set()
if self._receiver_thread is not None:
self._receiver_thread.join()
self._socket.close()
|
Stop the client.
|
def with_respect_to(self):
try:
name = self.order_with_respect_to
value = getattr(self, name)
except AttributeError:
return {}
field = getattr(self.__class__, name)
if isinstance(field, GenericForeignKey):
names = (field.ct_field, field.fk_field)
return dict([(n, getattr(self, n)) for n in names])
return {name: value}
|
Returns a dict to use as a filter for ordering operations
containing the original ``Meta.order_with_respect_to`` value
if provided. If the field is a Generic Relation, the dict
returned contains names and values for looking up the
relation's ``ct_field`` and ``fk_field`` attributes.
|
def get_unit_property(self, unit_id, property_name):
if isinstance(unit_id, (int, np.integer)):
if unit_id in self.get_unit_ids():
if unit_id not in self._unit_properties:
self._unit_properties[unit_id] = {}
if isinstance(property_name, str):
if property_name in list(self._unit_properties[unit_id].keys()):
return self._unit_properties[unit_id][property_name]
else:
raise ValueError(str(property_name) + " has not been added to unit " + str(unit_id))
else:
raise ValueError(str(property_name) + " must be a string")
else:
raise ValueError(str(unit_id) + " is not a valid unit_id")
else:
raise ValueError(str(unit_id) + " must be an int")
|
This function rerturns the data stored under the property name given
from the given unit.
Parameters
----------
unit_id: int
The unit id for which the property will be returned
property_name: str
The name of the property
Returns
----------
value
The data associated with the given property name. Could be many
formats as specified by the user.
|
def clear(self, key=None):
if not self.options.enabled:
return CACHE_DISABLED
logger.debug('clear(key={})'.format(repr(key)))
if key is not None and key in self._dict.keys():
del self._dict[key]
logger.info('cache cleared for key: ' + repr(key))
elif not key:
for cached_key in [k for k in self._dict.keys()]:
del self._dict[cached_key]
logger.info('cache cleared for ALL keys')
return True
|
Clear a cache entry, or the entire cache if no key is given
Returns CACHE_DISABLED if the cache is disabled
Returns True on successful operation
:param key: optional key to limit the clear operation to (defaults to None)
|
def _get_response(self, connection):
response_header = self._receive(connection, 13)
logger.debug('Response header: %s', response_header)
if (not response_header.startswith(b'ZBXD\x01') or
len(response_header) != 13):
logger.debug('Zabbix return not valid response.')
result = False
else:
response_len = struct.unpack('<Q', response_header[5:])[0]
response_body = connection.recv(response_len)
result = json.loads(response_body.decode("utf-8"))
logger.debug('Data received: %s', result)
try:
connection.close()
except Exception as err:
pass
return result
|
Get response from zabbix server, reads from self.socket.
:type connection: :class:`socket._socketobject`
:param connection: Socket to read.
:rtype: dict
:return: Response from zabbix server or False in case of error.
|
def byte_adaptor(fbuffer):
if six.PY3:
strings = fbuffer.read().decode('latin-1')
fbuffer = six.StringIO(strings)
return fbuffer
else:
return fbuffer
|
provides py3 compatibility by converting byte based
file stream to string based file stream
Arguments:
fbuffer: file like objects containing bytes
Returns:
string buffer
|
def find_one(self, filter=None, fields=None, skip=0, sort=None):
result = self.find(filter=filter, fields=fields, skip=skip, limit=1, sort=sort)
if len(result) > 0:
return result[0]
else:
return None
|
Similar to find. This method will only retrieve one row.
If no row matches, returns None
|
def image(cam):
yield marv.set_header(title=cam.topic)
msg = yield marv.pull(cam)
if msg is None:
return
pytype = get_message_type(cam)
rosmsg = pytype()
rosmsg.deserialize(msg.data)
name = '{}.jpg'.format(cam.topic.replace('/', ':')[1:])
imgfile = yield marv.make_file(name)
img = imgmsg_to_cv2(rosmsg, "rgb8")
cv2.imwrite(imgfile.path, img, (cv2.IMWRITE_JPEG_QUALITY, 60))
yield marv.push(imgfile)
|
Extract first image of input stream to jpg file.
Args:
cam: Input stream of raw rosbag messages.
Returns:
File instance for first image of input stream.
|
def file_content_list(self, project):
project_list = False
self.load_project_flag_list_file(il.get('project_exceptions'), project)
try:
flag_list = (fl['file_audits']['file_contents'])
except KeyError:
logger.error('Key Error processing file_contents list values')
try:
ignore_list = il['file_audits']['file_contents']
except KeyError:
logger.error('Key Error processing file_contents list values')
try:
project_list = fl['file_audits'][project]['file_contents']
logger.info('Loaded %s specific file_contents entries', project)
except KeyError:
logger.info('No project specific file_contents section for project %s', project)
if project_list:
ignore_list_merge = project_list + ignore_list
ignore_list_re = re.compile("|".join(ignore_list_merge), flags=re.IGNORECASE)
return flag_list, ignore_list_re
else:
ignore_list_re = re.compile("|".join(ignore_list),
flags=re.IGNORECASE)
return flag_list, ignore_list_re
|
gathers content strings
|
def rytov_sc(radius=5e-6, sphere_index=1.339, medium_index=1.333,
wavelength=550e-9, pixel_size=1e-7, grid_size=(80, 80),
center=(39.5, 39.5), radius_sampling=42):
r
r_ryt, n_ryt = correct_rytov_sc_input(radius_sc=radius,
sphere_index_sc=sphere_index,
medium_index=medium_index,
radius_sampling=radius_sampling)
qpi = mod_rytov.rytov(radius=r_ryt,
sphere_index=n_ryt,
medium_index=medium_index,
wavelength=wavelength,
pixel_size=pixel_size,
grid_size=grid_size,
center=center,
radius_sampling=radius_sampling)
qpi["sim radius"] = radius
qpi["sim index"] = sphere_index
qpi["sim model"] = "rytov-sc"
return qpi
|
r"""Field behind a dielectric sphere, systematically corrected Rytov
This method implements a correction of
:func:`qpsphere.models.rytov`, where the
`radius` :math:`r_\text{Ryt}` and the `sphere_index`
:math:`n_\text{Ryt}` are corrected using
the approach described in :cite:`Mueller2018` (eqns. 3,4, and 5).
.. math::
n_\text{Ryt-SC} &= n_\text{Ryt} + n_\text{med} \cdot
\left( a_n x^2 + b_n x + c_n \right)
r_\text{Ryt-SC} &= r_\text{Ryt} \cdot
\left( a_r x^2 +b_r x + c_r \right)
&\text{with} x = \frac{n_\text{Ryt}}{n_\text{med}} - 1
The correction factors are given in
:data:`qpsphere.models.mod_rytov_sc.RSC_PARAMS`.
Parameters
----------
radius: float
Radius of the sphere [m]
sphere_index: float
Refractive index of the sphere
medium_index: float
Refractive index of the surrounding medium
wavelength: float
Vacuum wavelength of the imaging light [m]
pixel_size: float
Pixel size [m]
grid_size: tuple of floats
Resulting image size in x and y [px]
center: tuple of floats
Center position in image coordinates [px]
radius_sampling: int
Number of pixels used to sample the sphere radius when
computing the Rytov field. The default value of 42
pixels is a reasonable number for single-cell analysis.
Returns
-------
qpi: qpimage.QPImage
Quantitative phase data set
|
def validate_word(self, word):
while word:
match = self.seg_regex.match(word)
if match:
word = word[len(match.group(0)):]
else:
return False
return True
|
Returns True if `word` consists exhaustively of valid IPA segments
Args:
word (unicode): input word as Unicode IPA string
Returns:
bool: True if `word` can be divided exhaustively into IPA segments
that exist in the database
|
def did_you_mean(unknown_command, entry_points):
from difflib import SequenceMatcher
similarity = lambda x: SequenceMatcher(None, x, unknown_command).ratio()
did_you_mean = sorted(entry_points, key=similarity, reverse=True)
return did_you_mean[0]
|
Return the command with the name most similar to what the user typed. This
is used to suggest a correct command when the user types an illegal
command.
|
def block_specification_to_number(block: BlockSpecification, web3: Web3) -> BlockNumber:
if isinstance(block, str):
msg = f"string block specification can't contain {block}"
assert block in ('latest', 'pending'), msg
number = web3.eth.getBlock(block)['number']
elif isinstance(block, T_BlockHash):
number = web3.eth.getBlock(block)['number']
elif isinstance(block, T_BlockNumber):
number = block
else:
if __debug__:
raise AssertionError(f'Unknown type {type(block)} given for block specification')
return BlockNumber(number)
|
Converts a block specification to an actual block number
|
def from_requirement(cls, provider, requirement, parent):
candidates = provider.find_matches(requirement)
if not candidates:
raise NoVersionsAvailable(requirement, parent)
return cls(
candidates=candidates,
information=[RequirementInformation(requirement, parent)],
)
|
Build an instance from a requirement.
|
def job(name, **kwargs):
return task(name=name, schedulable=True, base=JobTask,
bind=True, **kwargs)
|
A shortcut decorator for declaring jobs
|
def get_modpath_from_modname(modname, prefer_pkg=False, prefer_main=False):
from os.path import dirname, basename, join, exists
initname = '__init__.py'
mainname = '__main__.py'
if modname in sys.modules:
modpath = sys.modules[modname].__file__.replace('.pyc', '.py')
else:
import pkgutil
loader = pkgutil.find_loader(modname)
modpath = loader.filename.replace('.pyc', '.py')
if '.' not in basename(modpath):
modpath = join(modpath, initname)
if prefer_pkg:
if modpath.endswith(initname) or modpath.endswith(mainname):
modpath = dirname(modpath)
if prefer_main:
if modpath.endswith(initname):
main_modpath = modpath[:-len(initname)] + mainname
if exists(main_modpath):
modpath = main_modpath
return modpath
|
Same as get_modpath but doesnt import directly
SeeAlso:
get_modpath
|
def _close_app(app, mongo_client, client):
app.stop()
client.close()
mongo_client.close()
|
Ensures that the app is properly closed
|
def htmresearchCorePrereleaseInstalled():
try:
coreDistribution = pkg_resources.get_distribution("htmresearch-core")
if pkg_resources.parse_version(coreDistribution.version).is_prerelease:
return True
except pkg_resources.DistributionNotFound:
pass
return False
|
Make an attempt to determine if a pre-release version of htmresearch-core is
installed already.
@return: boolean
|
def cursor_after(self):
if isinstance(self._cursor_after, BaseException):
raise self._cursor_after
return self._cursor_after
|
Return the cursor after the current item.
You must pass a QueryOptions object with produce_cursors=True
for this to work.
If there is no cursor or no current item, raise BadArgumentError.
Before next() has returned there is no cursor. Once the loop is
exhausted, this returns the cursor after the last item.
|
def unregister_listener(self, address, func):
listeners = self.address_listeners[address]
if listeners is None:
return False
if func in listeners:
listeners.remove(func)
return True
return False
|
Removes a listener function for a given address
Remove the listener for the given address. Returns true if the listener
was found and removed, false otherwise
|
def _format_generic(lines, element, printed, spacer=""):
for doc in element.docstring:
if doc.doctype.lower() not in printed:
lines.append(spacer + doc.__str__())
|
Generically formats all remaining docstrings and custom XML
tags that don't appear in the list of already printed documentation.
:arg printed: a list of XML tags for the element that have already
been handled by a higher method.
|
def wash_html_id(dirty):
import re
if not dirty[0].isalpha():
dirty = 'i' + dirty
non_word = re.compile(r'[^\w]+')
return non_word.sub('', dirty)
|
Strip non-alphabetic or newline characters from a given string.
It can be used as a HTML element ID (also with jQuery and in all browsers).
:param dirty: the string to wash
:returns: the HTML ID ready string
|
def _build_sentence(word):
return (
"(?:{word}|[{non_stops}]|(?<![{stops} ]) )+"
"[{stops}]['\"\]\}}\)]*"
).format(word=word, non_stops=non_stops.replace('-', '\-'),
stops=stops)
|
Builds a Pinyin sentence re pattern from a Pinyin word re pattern.
A sentence is defined as a series of valid Pinyin words, punctuation
(non-stops), and spaces followed by a single stop and zero or more
container-closing punctuation marks (e.g. apostrophe and brackets).
|
def get_available_options(self, service_name):
options = {}
for data_dir in self.data_dirs:
service_glob = "{0}-*.json".format(service_name)
path = os.path.join(data_dir, service_glob)
found = glob.glob(path)
for match in found:
base = os.path.basename(match)
bits = os.path.splitext(base)[0].split('-', 1)
if len(bits) < 2:
continue
api_version = bits[1]
options.setdefault(api_version, [])
options[api_version].append(match)
return options
|
Fetches a collection of all JSON files for a given service.
This checks user-created files (if present) as well as including the
default service files.
Example::
>>> loader.get_available_options('s3')
{
'2013-11-27': [
'~/.boto-overrides/s3-2013-11-27.json',
'/path/to/kotocore/data/aws/resources/s3-2013-11-27.json',
],
'2010-10-06': [
'/path/to/kotocore/data/aws/resources/s3-2010-10-06.json',
],
'2007-09-15': [
'~/.boto-overrides/s3-2007-09-15.json',
],
}
:param service_name: The name of the desired service
:type service_name: string
:returns: A dictionary of api_version keys, with a list of filepaths
for that version (in preferential order).
:rtype: dict
|
def service_desks(self):
url = self._options['server'] + '/rest/servicedeskapi/servicedesk'
headers = {'X-ExperimentalApi': 'opt-in'}
r_json = json_loads(self._session.get(url, headers=headers))
projects = [ServiceDesk(self._options, self._session, raw_project_json)
for raw_project_json in r_json['values']]
return projects
|
Get a list of ServiceDesk Resources from the server visible to the current authenticated user.
:rtype: List[ServiceDesk]
|
def sign(self, private_keys):
if private_keys is None or not isinstance(private_keys, list):
raise TypeError('`private_keys` must be a list instance')
def gen_public_key(private_key):
public_key = private_key.get_verifying_key().encode()
return public_key.decode()
key_pairs = {gen_public_key(PrivateKey(private_key)):
PrivateKey(private_key) for private_key in private_keys}
tx_dict = self.to_dict()
tx_dict = Transaction._remove_signatures(tx_dict)
tx_serialized = Transaction._to_str(tx_dict)
for i, input_ in enumerate(self.inputs):
self.inputs[i] = self._sign_input(input_, tx_serialized, key_pairs)
self._hash()
return self
|
Fulfills a previous Transaction's Output by signing Inputs.
Note:
This method works only for the following Cryptoconditions
currently:
- Ed25519Fulfillment
- ThresholdSha256
Furthermore, note that all keys required to fully sign the
Transaction have to be passed to this method. A subset of all
will cause this method to fail.
Args:
private_keys (:obj:`list` of :obj:`str`): A complete list of
all private keys needed to sign all Fulfillments of this
Transaction.
Returns:
:class:`~bigchaindb.common.transaction.Transaction`
|
def close(self):
self._check_device_status()
hidapi.hid_close(self._device)
self._device = None
|
Close connection to HID device.
Automatically run when a Device object is garbage-collected, though
manual invocation is recommended.
|
def dt2ts(dt):
assert isinstance(dt, (datetime.datetime, datetime.date))
ret = time.mktime(dt.timetuple())
if isinstance(dt, datetime.datetime):
ret += 1e-6 * dt.microsecond
return ret
|
Converts to float representing number of seconds since 1970-01-01 GMT.
|
def _check_permission(self, name, obj=None):
def redirect_or_exception(ex):
if not self.request.user or not self.request.user.is_authenticated:
if self.auto_login_redirect:
redirect_to_login(self.request.get_full_path())
else:
raise HTTPUnauthorizedResponseException
else:
raise ex
try:
if not self._has_permission(name, obj):
redirect_or_exception(HTTPForbiddenResponseException)
except Http404 as ex:
redirect_or_exception(ex)
|
If customer is not authorized he should not get information that object is exists.
Therefore 403 is returned if object was not found or is redirected to the login page.
If custmer is authorized and object was not found is returned 404.
If object was found and user is not authorized is returned 403 or redirect to login page.
If object was found and user is authorized is returned 403 or 200 according of result of _has_permission method.
|
def fetch(self, **kwargs) -> 'FetchContextManager':
assert self.method in self._allowed_methods, \
'Disallowed HTTP method: {}'.format(self.method)
self.date = datetime.now(tzutc())
self.headers['Date'] = self.date.isoformat()
if self.content_type is not None:
self.headers['Content-Type'] = self.content_type
full_url = self._build_url()
self._sign(full_url.relative())
rqst_ctx = self.session.aiohttp_session.request(
self.method,
str(full_url),
data=self._pack_content(),
timeout=_default_request_timeout,
headers=self.headers)
return FetchContextManager(self.session, rqst_ctx, **kwargs)
|
Sends the request to the server and reads the response.
You may use this method either with plain synchronous Session or
AsyncSession. Both the followings patterns are valid:
.. code-block:: python3
from ai.backend.client.request import Request
from ai.backend.client.session import Session
with Session() as sess:
rqst = Request(sess, 'GET', ...)
with rqst.fetch() as resp:
print(resp.text())
.. code-block:: python3
from ai.backend.client.request import Request
from ai.backend.client.session import AsyncSession
async with AsyncSession() as sess:
rqst = Request(sess, 'GET', ...)
async with rqst.fetch() as resp:
print(await resp.text())
|
def retrieve_data(self):
df = self.manager.get_historic_data(self.start.date(), self.end.date())
df.replace(0, np.nan, inplace=True)
return df
|
Retrives data as a DataFrame.
|
def find_nearest(x, x0) -> Tuple[int, Any]:
x = np.asanyarray(x)
x0 = np.atleast_1d(x0)
if x.size == 0 or x0.size == 0:
raise ValueError('empty input(s)')
if x0.ndim not in (0, 1):
raise ValueError('2-D x0 not handled yet')
ind = np.empty_like(x0, dtype=int)
for i, xi in enumerate(x0):
if xi is not None and (isinstance(xi, (datetime.datetime, datetime.date, np.datetime64)) or np.isfinite(xi)):
ind[i] = np.nanargmin(abs(x-xi))
else:
raise ValueError('x0 must NOT be None or NaN to avoid surprising None return value')
return ind.squeeze()[()], x[ind].squeeze()[()]
|
This find_nearest function does NOT assume sorted input
inputs:
x: array (float, int, datetime, h5py.Dataset) within which to search for x0
x0: singleton or array of values to search for in x
outputs:
idx: index of flattened x nearest to x0 (i.e. works with higher than 1-D arrays also)
xidx: x[idx]
Observe how bisect.bisect() gives the incorrect result!
idea based on:
http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
|
def submit(self, port_id, tuple_):
port_index = self._splpy_output_ports[port_id]
ec._submit(self, port_index, tuple_)
|
Submit a tuple to the output port.
The value to be submitted (``tuple_``) can be a ``None`` (nothing will be submitted),
``tuple``, ``dict` or ``list`` of those types. For details
on how the ``tuple_`` is mapped to an SPL tuple see :ref:`submit-from-python`.
Args:
port_id: Identifier of the port specified in the
``output_ports`` parameter of the ``@spl.primitive_operator``
decorator.
tuple_: Tuple (or tuples) to be submitted to the output port.
|
def build_request_relationship(type, ids):
if ids is None:
return {
'data': None
}
elif isinstance(ids, str):
return {
'data': {'id': ids, 'type': type}
}
else:
return {
"data": [{"id": id, "type": type} for id in ids]
}
|
Build a relationship list.
A relationship list is used to update relationships between two
resources. Setting sensors on a label, for example, uses this
function to construct the list of sensor ids to pass to the Helium
API.
Args:
type(string): The resource type for the ids in the relationship
ids([uuid] or uuid): Just one or a list of resource uuids to use
in the relationship
Returns:
A ready to use relationship JSON object.
|
def init_search(self):
if self.verbose:
logger.info("Initializing search.")
for generator in self.generators:
graph = generator(self.n_classes, self.input_shape).generate(
self.default_model_len, self.default_model_width
)
model_id = self.model_count
self.model_count += 1
self.training_queue.append((graph, -1, model_id))
self.descriptors.append(graph.extract_descriptor())
if self.verbose:
logger.info("Initialization finished.")
|
Call the generators to generate the initial architectures for the search.
|
def get_publisher(self):
doi_prefix = self.doi.split('/')[0]
try:
publisher_mod = openaccess_epub.publisher.import_by_doi(doi_prefix)
except ImportError as e:
log.exception(e)
return None
return publisher_mod.pub_class(self)
|
This method defines how the Article tries to determine the publisher of
the article.
This method relies on the success of the get_DOI method to fetch the
appropriate full DOI for the article. It then takes the DOI prefix
which corresponds to the publisher and then uses that to attempt to load
the correct publisher-specific code. This may fail; if the DOI is not
mapped to a code file, if the DOI is mapped but the code file could not
be located, or if the mapped code file is malformed then this method
will issue/log an informative error message and return None. This method
will not try to infer the publisher based on any metadata other than the
DOI of the article.
Returns
-------
publisher : Publisher instance or None
|
def file_copy(name, dest=None, **kwargs):
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
ret['changes'] = __salt__['junos.file_copy'](name, dest, **kwargs)
return ret
|
Copies the file from the local device to the junos device.
.. code-block:: yaml
/home/m2/info.txt:
junos:
- file_copy
- dest: info_copy.txt
Parameters:
Required
* src:
The sorce path where the file is kept.
* dest:
The destination path where the file will be copied.
|
def hook_drag(self):
widget = self.widget
widget.mousePressEvent = self.mousePressEvent
widget.mouseMoveEvent = self.mouseMoveEvent
widget.mouseReleaseEvent = self.mouseReleaseEvent
|
Install the hooks for drag operations.
|
def removeSubscribers(self, emails_list):
if not hasattr(emails_list, "__iter__"):
error_msg = "Input parameter 'emails_list' is not iterable"
self.log.error(error_msg)
raise exception.BadValue(error_msg)
missing_flags = True
headers, raw_data = self._perform_subscribe()
for email in emails_list:
missing_flag, raw_data = self._remove_subscriber(email, raw_data)
missing_flags = missing_flags and missing_flag
if missing_flags:
return
self._update_subscribe(headers, raw_data)
self.log.info("Successfully remove subscribers: %s for <Workitem %s>",
emails_list, self)
|
Remove subscribers from this workitem
If the subscribers have not been added, no more actions will be
performed.
:param emails_list: a :class:`list`/:class:`tuple`/:class:`set`
contains the the subscribers' emails
|
def ellipsis(text, length, symbol="..."):
if len(text) > length:
pos = text.rfind(" ", 0, length)
if pos < 0:
return text[:length].rstrip(".") + symbol
else:
return text[:pos].rstrip(".") + symbol
else:
return text
|
Present a block of text of given length.
If the length of available text exceeds the requested length, truncate and
intelligently append an ellipsis.
|
def load_data(self, data_np):
image = AstroImage.AstroImage(logger=self.logger)
image.set_data(data_np)
self.set_image(image)
|
Load raw numpy data into the viewer.
|
def update(self, **kwargs):
update_compute = False
old_json = self.__json__()
compute_properties = None
for prop in kwargs:
if getattr(self, prop) != kwargs[prop]:
if prop not in self.CONTROLLER_ONLY_PROPERTIES:
update_compute = True
if prop == "properties":
compute_properties = kwargs[prop]
else:
setattr(self, prop, kwargs[prop])
self._list_ports()
if old_json != self.__json__():
self.project.controller.notification.emit("node.updated", self.__json__())
if update_compute:
data = self._node_data(properties=compute_properties)
response = yield from self.put(None, data=data)
yield from self.parse_node_response(response.json)
self.project.dump()
|
Update the node on the compute server
:param kwargs: Node properties
|
def to_geojson(self, filename):
with open(filename, 'w') as fd:
json.dump(self.to_record(WGS84_CRS), fd)
|
Save vector as geojson.
|
def _make_concept(self, entity):
name = self._sanitize(entity['canonicalName'])
db_refs = _get_grounding(entity)
concept = Concept(name, db_refs=db_refs)
metadata = {arg['type']: arg['value']['@id']
for arg in entity['arguments']}
return concept, metadata
|
Return Concept from a Hume entity.
|
def tip_fdr(a, alpha=0.05):
zscores = tip_zscores(a)
pvals = stats.norm.pdf(zscores)
rejected, fdrs = fdrcorrection(pvals)
return fdrs
|
Returns adjusted TIP p-values for a particular `alpha`.
(see :func:`tip_zscores` for more info)
:param a: NumPy array, where each row is the signal for a feature
:param alpha: False discovery rate
|
def run(self):
from zengine.lib.cache import WFSpecNames
if self.manager.args.clear:
self._clear_models()
return
if self.manager.args.wf_path:
paths = self.get_wf_from_path(self.manager.args.wf_path)
else:
paths = self.get_workflows()
self.count = 0
self.do_with_submit(self.load_diagram, paths, threads=self.manager.args.threads)
WFSpecNames().refresh()
print("%s BPMN file loaded" % self.count)
|
read workflows, checks if it's updated,
tries to update if there aren't any running instances of that wf
|
def make_and_return_path_from_path_and_folder_names(path, folder_names):
for folder_name in folder_names:
path += folder_name + '/'
try:
os.makedirs(path)
except FileExistsError:
pass
return path
|
For a given path, create a directory structure composed of a set of folders and return the path to the \
inner-most folder.
For example, if path='/path/to/folders', and folder_names=['folder1', 'folder2'], the directory created will be
'/path/to/folders/folder1/folder2/' and the returned path will be '/path/to/folders/folder1/folder2/'.
If the folders already exist, routine continues as normal.
Parameters
----------
path : str
The path where the directories are created.
folder_names : [str]
The names of the folders which are created in the path directory.
Returns
-------
path
A string specifying the path to the inner-most folder created.
Examples
--------
path = '/path/to/folders'
path = make_and_return_path(path=path, folder_names=['folder1', 'folder2'].
|
def get_buffer(self):
if self.doc_to_update:
self.update_sources()
ES_buffer = self.action_buffer
self.clean_up()
return ES_buffer
|
Get buffer which needs to be bulked to elasticsearch
|
def traverse_inorder(self, leaves=True, internal=True):
for node in self.root.traverse_inorder(leaves=leaves, internal=internal):
yield node
|
Perform an inorder traversal of the ``Node`` objects in this ``Tree``
Args:
``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
|
def cmd_watch(args):
if len(args) == 0:
mpstate.status.watch = None
return
mpstate.status.watch = args
print("Watching %s" % mpstate.status.watch)
|
watch a mavlink packet pattern
|
def serialize_on_post_delete(sender, instance, using, **kwargs):
try:
wrapped_instance = site_offline_models.get_wrapped_instance(instance)
except ModelNotRegistered:
pass
else:
wrapped_instance.to_outgoing_transaction(using, created=False, deleted=True)
|
Creates a serialized OutgoingTransaction when
a model instance is deleted.
Skip those not registered.
|
def predict(self, X, nsamples=200, likelihood_args=()):
Ey, _ = self.predict_moments(X, nsamples, likelihood_args)
return Ey
|
Predict target values from Bayesian generalized linear regression.
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, d dimensions).
nsamples : int, optional
Number of samples for sampling the expected target values from the
predictive distribution.
likelihood_args : sequence, optional
sequence of arguments to pass to the likelihood function. These are
non-learnable parameters. They can be scalars or arrays of length
N.
Returns
-------
Ey : ndarray
The expected value of y* for the query inputs, X* of shape (N*,).
|
def _load(self):
if os.path.exists(self.path):
root = ET.parse(self.path).getroot()
if (root.tag == "fortpy" and "mode" in root.attrib and
root.attrib["mode"] == "template" and "direction" in root.attrib and
root.attrib["direction"] == self.direction):
for v in _get_xml_version(root):
self.versions[v] = TemplateContents()
self._load_entries(root)
if "autoname" in root.attrib:
self.name = root.attrib["autoname"]
else:
msg.err("the specified template {} ".format(self.path) +
"is missing the mode and direction attributes.")
exit(1)
else:
msg.err("could not find the template {}.".format(self.path))
exit(1)
|
Extracts the XML template data from the file.
|
def get_extra(cls, name=None):
if not name:
return cls._extra_config
return cls._extra_config.get(name, None)
|
Gets extra configuration parameters.
These parameters should be loaded through load_extra or load_extra_data.
Args:
name: str, the name of the configuration data to load.
Returns:
A dictionary containing the requested configuration data. None if
data was never loaded under that name.
|
def take(self, num_instances: int = 1, timeout: Optional[float] = None) -> None:
if num_instances < 1:
raise ValueError(f"Process must request at least 1 instance; here requested {num_instances}.")
if num_instances > self.num_instances_total:
raise ValueError(
f"Process must request at most {self.num_instances_total} instances; here requested {num_instances}."
)
if _logger is not None:
self._log(INFO, "take", num_instances=num_instances, free=self.num_instances_free)
proc = Process.current()
if self._num_instances_free < num_instances:
proc.local.__num_instances_required = num_instances
try:
self._waiting.join(timeout)
finally:
del proc.local.__num_instances_required
self._num_instances_free -= num_instances
if _logger is not None and proc in self._usage:
self._log(WARNING, "take-again", already=self._usage[proc], more=num_instances)
self._usage.setdefault(proc, 0)
self._usage[proc] += num_instances
|
The current process reserves a certain number of instances. If there are not enough instances available, the
process is made to join a queue. When this method returns, the process holds the instances it has requested to
take.
:param num_instances:
Number of resource instances to take.
:param timeout:
If this parameter is not ``None``, it is taken as a delay at the end of which the process times out, and
leaves the queue forcibly. In such a situation, a :py:class:`Timeout` exception is raised on the process.
|
def _get_billing_cycle_number(self, billing_cycle):
begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower
if begins_before_initial_date:
raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle(
'{} precedes initial cycle {}'.format(billing_cycle, self.initial_billing_cycle)
)
billing_cycle_number = BillingCycle.objects.filter(
date_range__contained_by=DateRange(
self.initial_billing_cycle.date_range.lower,
billing_cycle.date_range.upper,
bounds='[]',
),
).count()
return billing_cycle_number
|
Gets the 1-indexed number of the billing cycle relative to the provided billing cycle
|
def unzip_file(source_file, dest_dir=None, mkdir=False):
if dest_dir is None:
dest_dir, fname = os.path.split(source_file)
elif not os.path.isdir(dest_dir):
if mkdir:
preparedir(dest_dir)
else:
created = preparedir(dest_dir, False)
if not created:
raise ValueError("Failed to find %s." % dest_dir)
with zipfile.ZipFile(source_file) as zf:
for member in zf.infolist():
words = member.filename.split('\\')
for word in words[:-1]:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir, ''):
continue
dest_dir = os.path.join(dest_dir, word)
zf.extract(member, dest_dir)
|
Unzip a compressed file.
Args:
source_file: Full path to a valid compressed file (e.g. c:/ladybug/testPts.zip)
dest_dir: Target folder to extract to (e.g. c:/ladybug).
Default is set to the same directory as the source file.
mkdir: Set to True to create the directory if doesn't exist (Default: False)
|
def _get_retention_policy_value(self):
if self.RetentionPolicy is None or self.RetentionPolicy.lower() == self.RETAIN.lower():
return self.RETAIN
elif self.RetentionPolicy.lower() == self.DELETE.lower():
return self.DELETE
elif self.RetentionPolicy.lower() not in self.retention_policy_options:
raise InvalidResourceException(self.logical_id,
"'{}' must be one of the following options: {}."
.format('RetentionPolicy', [self.RETAIN, self.DELETE]))
|
Sets the deletion policy on this resource. The default is 'Retain'.
:return: value for the DeletionPolicy attribute.
|
def fen(self, *, shredder: bool = False, en_passant: str = "legal", promoted: Optional[bool] = None) -> str:
return " ".join([
self.epd(shredder=shredder, en_passant=en_passant, promoted=promoted),
str(self.halfmove_clock),
str(self.fullmove_number)
])
|
Gets a FEN representation of the position.
A FEN string (e.g.,
``rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1``) consists
of the position part :func:`~chess.Board.board_fen()`, the
:data:`~chess.Board.turn`, the castling part
(:data:`~chess.Board.castling_rights`),
the en passant square (:data:`~chess.Board.ep_square`),
the :data:`~chess.Board.halfmove_clock`
and the :data:`~chess.Board.fullmove_number`.
:param shredder: Use :func:`~chess.Board.castling_shredder_fen()`
and encode castling rights by the file of the rook
(like ``HAha``) instead of the default
:func:`~chess.Board.castling_xfen()` (like ``KQkq``).
:param en_passant: By default, only fully legal en passant squares
are included (:func:`~chess.Board.has_legal_en_passant()`).
Pass ``fen`` to strictly follow the FEN specification
(always include the en passant square after a two-step pawn move)
or ``xfen`` to follow the X-FEN specification
(:func:`~chess.Board.has_pseudo_legal_en_passant()`).
:param promoted: Mark promoted pieces like ``Q~``. By default, this is
only enabled in chess variants where this is relevant.
|
def _exec_info(self):
if self._info is None:
self._info = self.client.exec_inspect(self.exec_id)
return self._info
|
Caching wrapper around client.exec_inspect
|
def get_field_setup_query(query, model, column_name):
if not hasattr(model, column_name):
rel_model = getattr(model, column_name.split(".")[0]).mapper.class_
query = query.join(rel_model)
return query, getattr(rel_model, column_name.split(".")[1])
else:
return query, getattr(model, column_name)
|
Help function for SQLA filters, checks for dot notation on column names.
If it exists, will join the query with the model
from the first part of the field name.
example:
Contact.created_by: if created_by is a User model,
it will be joined to the query.
|
def _completion_checker(async_id, context_id):
if not context_id:
logging.debug("Context for async %s does not exist", async_id)
return
context = FuriousContext.from_id(context_id)
marker = FuriousCompletionMarker.get_by_id(context_id)
if marker and marker.complete:
logging.info("Context %s already complete" % context_id)
return True
task_ids = context.task_ids
if async_id in task_ids:
task_ids.remove(async_id)
logging.debug("Loaded context.")
logging.debug(task_ids)
done, has_errors = _check_markers(task_ids)
if not done:
return False
_mark_context_complete(marker, context, has_errors)
return True
|
Check if all Async jobs within a Context have been run.
|
def _transliterate (self, text, outFormat):
result = []
text = self._preprocess(text)
i = 0
while i < len(text):
if text[i].isspace():
result.append(text[i])
i = i+1
else:
chr = self._getNextChar(text, i)
try:
result.append(self[chr].unichr)
except KeyError:
result.append(_unrecognised(chr))
i = i + len(chr)
return result
|
Transliterate the text to Unicode.
|
def close(self):
log.debug("%r: close", self)
self._closing = True
brokerclients, self.clients = self.clients, None
self._close_brokerclients(brokerclients.values())
self.reset_all_metadata()
return self.close_dlist or defer.succeed(None)
|
Permanently dispose of the client
- Immediately mark the client as closed, causing current operations to
fail with :exc:`~afkak.common.CancelledError` and future operations to
fail with :exc:`~afkak.common.ClientError`.
- Clear cached metadata.
- Close any connections to Kafka brokers.
:returns:
deferred that fires when all resources have been released
|
def serialize_upload(name, storage, url):
if isinstance(storage, LazyObject):
storage._setup()
cls = storage._wrapped.__class__
else:
cls = storage.__class__
return signing.dumps({
'name': name,
'storage': '%s.%s' % (cls.__module__, cls.__name__)
}, salt=url)
|
Serialize uploaded file by name and storage. Namespaced by the upload url.
|
def diffsp(self, col: str, serie: "iterable", name: str="Diff"):
try:
d = []
for i, row in self.df.iterrows():
v = (row[col]*100) / serie[i]
d.append(v)
self.df[name] = d
except Exception as e:
self.err(e, self._append, "Can not diff column from serie")
|
Add a diff column in percentage from a serie. The serie is
an iterable of the same length than the dataframe
:param col: column to diff
:type col: str
:param serie: serie to diff from
:type serie: iterable
:param name: name of the diff col, defaults to "Diff"
:param name: str, optional
:example: ``ds.diffp("Col 1", [1, 1, 4], "New col")``
|
def cancel_orders(self, order_ids: List[str]) -> List[str]:
orders_to_cancel = order_ids
self.log.debug(f'Canceling orders on {self.name}: ids={orders_to_cancel}')
cancelled_orders = []
if self.dry_run:
self.log.warning(f'DRY RUN: Orders cancelled on {self.name}: {orders_to_cancel}')
return orders_to_cancel
try:
if self.has_batch_cancel:
self._cancel_orders(orders_to_cancel)
cancelled_orders.append(orders_to_cancel)
orders_to_cancel.clear()
else:
for i, order_id in enumerate(orders_to_cancel):
self._cancel_order(order_id)
cancelled_orders.append(order_id)
orders_to_cancel.pop(i)
except Exception as e:
msg = f'Failed to cancel {len(orders_to_cancel)} orders on {self.name}: ids={orders_to_cancel}'
raise self.exception(OrderNotFound, msg, e) from e
self.log.info(f'Orders cancelled on {self.name}: ids={cancelled_orders}')
return cancelled_orders
|
Cancel multiple orders by a list of IDs.
|
def _copy_selection(self, *event):
if react_to_event(self.view, self.view.editor, event):
logger.debug("copy selection")
global_clipboard.copy(self.model.selection)
return True
|
Copies the current selection to the clipboard.
|
def folder(self) -> typing.Union[str, None]:
if 'folder' in self.data:
return self.data.get('folder')
elif self.project_folder:
if callable(self.project_folder):
return self.project_folder()
else:
return self.project_folder
return None
|
The folder, relative to the project source_directory, where the file
resides
:return:
|
def get_repository_configuration(id):
response = utils.checked_api_call(pnc_api.repositories, 'get_specific', id=id)
if response:
return response.content
|
Retrieve a specific RepositoryConfiguration
|
def del_graph(self, graph):
g = self.pack(graph)
self.sql('del_edge_val_graph', g)
self.sql('del_node_val_graph', g)
self.sql('del_edge_val_graph', g)
self.sql('del_edges_graph', g)
self.sql('del_nodes_graph', g)
self.sql('del_graph', g)
|
Delete all records to do with the graph
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.