code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def is_left(point0, point1, point2):
return ((point1[0] - point0[0]) * (point2[1] - point0[1])) - ((point2[0] - point0[0]) * (point1[1] - point0[1]))
|
Tests if a point is Left|On|Right of an infinite line.
Ported from the C++ version: on http://geomalgorithms.com/a03-_inclusion.html
.. note:: This implementation only works in 2-dimensional space.
:param point0: Point P0
:param point1: Point P1
:param point2: Point P2
:return:
>0 for P2 left of the line through P0 and P1
=0 for P2 on the line
<0 for P2 right of the line
|
def sign(self, consumer_secret, access_token_secret, method, url,
oauth_params, req_kwargs):
key = self._escape(consumer_secret) + b'&'
if access_token_secret:
key += self._escape(access_token_secret)
return key.decode()
|
Sign request using PLAINTEXT method.
:param consumer_secret: Consumer secret.
:type consumer_secret: str
:param access_token_secret: Access token secret (optional).
:type access_token_secret: str
:param method: Unused
:type method: str
:param url: Unused
:type url: str
:param oauth_params: Unused
:type oauth_params: dict
:param req_kwargs: Unused
:type req_kwargs: dict
|
def build_log_presenters(service_names, monochrome):
prefix_width = max_name_width(service_names)
def no_color(text):
return text
for color_func in cycle([no_color] if monochrome else colors.rainbow()):
yield LogPresenter(prefix_width, color_func)
|
Return an iterable of functions.
Each function can be used to format the logs output of a container.
|
def get_xname(self, var, coords=None):
if coords is not None:
coord = self.get_variable_by_axis(var, 'x', coords)
if coord is not None and coord.name in var.dims:
return coord.name
dimlist = list(self.x.intersection(var.dims))
if dimlist:
if len(dimlist) > 1:
warn("Found multiple matches for x coordinate in the variable:"
"%s. I use %s" % (', '.join(dimlist), dimlist[0]),
PsyPlotRuntimeWarning)
return dimlist[0]
return var.dims[-1]
|
Get the name of the x-dimension
This method gives the name of the x-dimension (which is not necessarily
the name of the coordinate if the variable has a coordinate attribute)
Parameters
----------
var: xarray.Variables
The variable to get the dimension for
coords: dict
The coordinates to use for checking the axis attribute. If None,
they are not used
Returns
-------
str
The coordinate name
See Also
--------
get_x
|
def process_file(filename, interval=None, lazy=False):
mp = MedscanProcessor()
mp.process_csxml_file(filename, interval, lazy)
return mp
|
Process a CSXML file for its relevant information.
Consider running the fix_csxml_character_encoding.py script in
indra/sources/medscan to fix any encoding issues in the input file before
processing.
Attributes
----------
filename : str
The csxml file, containing Medscan XML, to process
interval : (start, end) or None
Select the interval of documents to read, starting with the
`start`th document and ending before the `end`th document. If
either is None, the value is considered undefined. If the value
exceeds the bounds of available documents, it will simply be
ignored.
lazy : bool
If True, the statements will not be generated immediately, but rather
a generator will be formulated, and statements can be retrieved by
using `iter_statements`. If False, the `statements` attribute will be
populated immediately. Default is False.
Returns
-------
mp : MedscanProcessor
A MedscanProcessor object containing extracted statements
|
def stream(func):
@wraps(func)
def wrapped(manager, *args, **kwargs):
offset, limit = kwargs.pop('_offset', None), kwargs.pop('_limit', None)
qs = func(manager, *args, **kwargs)
if isinstance(qs, dict):
qs = manager.public(**qs)
elif isinstance(qs, (list, tuple)):
qs = manager.public(*qs)
if offset or limit:
qs = qs[offset:limit]
return qs.fetch_generic_relations()
return wrapped
|
Stream decorator to be applied to methods of an ``ActionManager`` subclass
Syntax::
from actstream.decorators import stream
from actstream.managers import ActionManager
class MyManager(ActionManager):
@stream
def foobar(self, ...):
...
|
def collmat(self, tau, deriv_order=0):
dummy = self.__call__(0.)
nbasis = dummy.shape[0]
tau = np.atleast_1d(tau)
if tau.ndim > 1:
raise ValueError("tau must be a list or a rank-1 array")
A = np.empty( (tau.shape[0], nbasis), dtype=dummy.dtype )
f = self.diff(order=deriv_order)
for i,taui in enumerate(tau):
A[i,:] = f(taui)
return np.squeeze(A)
|
Compute collocation matrix.
Parameters:
tau:
Python list or rank-1 array, collocation sites
deriv_order:
int, >=0, order of derivative for which to compute the collocation matrix.
The default is 0, which means the function value itself.
Returns:
A:
if len(tau) > 1, rank-2 array such that
A[i,j] = D**deriv_order B_j(tau[i])
where
D**k = kth derivative (0 for function value itself)
if len(tau) == 1, rank-1 array such that
A[j] = D**deriv_order B_j(tau)
Example:
If the coefficients of a spline function are given in the vector c, then::
np.sum( A*c, axis=-1 )
will give a rank-1 array of function values at the sites tau[i] that were supplied
to `collmat`.
Similarly for derivatives (if the supplied `deriv_order`> 0).
|
def require_minimum_pandas_version():
minimum_pandas_version = "0.19.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
|
Raise ImportError if minimum version of Pandas is not installed
|
def get_timestamp(self, **kwargs):
timestamp = kwargs.get('timestamp')
if not timestamp:
now = datetime.datetime.utcnow()
timestamp = now.strftime("%Y-%m-%dT%H:%M:%S") + ".%03d" % (now.microsecond / 1000) + "Z"
return timestamp
|
Retrieves the timestamp for a given set of data
|
def EscapeWildcards(string):
precondition.AssertType(string, Text)
return string.replace("%", r"\%").replace("_", r"\_")
|
Escapes wildcard characters for strings intended to be used with `LIKE`.
Databases don't automatically escape wildcard characters ('%', '_'), so any
non-literal string that is passed to `LIKE` and is expected to match literally
has to be manually escaped.
Args:
string: A string to escape.
Returns:
An escaped string.
|
def _replace_global_vars(xs, global_vars):
if isinstance(xs, (list, tuple)):
return [_replace_global_vars(x) for x in xs]
elif isinstance(xs, dict):
final = {}
for k, v in xs.items():
if isinstance(v, six.string_types) and v in global_vars:
v = global_vars[v]
final[k] = v
return final
else:
return xs
|
Replace globally shared names from input header with value.
The value of the `algorithm` item may be a pointer to a real
file specified in the `global` section. If found, replace with
the full value.
|
def _gist_is_preset(repo):
_, gistid = repo.split("/")
gist_template = "https://api.github.com/gists/{}"
gist_path = gist_template.format(gistid)
response = get(gist_path)
if response.status_code == 404:
return False
try:
data = response.json()
except:
return False
files = data.get("files", {})
package = files.get("package.json", {})
try:
content = json.loads(package.get("content", ""))
except:
return False
if content.get("type") != "bepreset":
return False
return True
|
Evaluate whether gist is a be package
Arguments:
gist (str): username/id pair e.g. mottosso/2bb4651a05af85711cde
|
def set_ifo_tag(self,ifo_tag,pass_to_command_line=True):
self.__ifo_tag = ifo_tag
if pass_to_command_line:
self.add_var_opt('ifo-tag', ifo_tag)
|
Set the ifo tag that is passed to the analysis code.
@param ifo_tag: a string to identify one or more IFOs
@bool pass_to_command_line: add ifo-tag as a variable option.
|
def get_maximum_score_metadata(self):
metadata = dict(self._mdata['maximum_score'])
metadata.update({'existing_cardinal_values': self._my_map['maximumScore']})
return Metadata(**metadata)
|
Gets the metadata for the maximum score.
return: (osid.Metadata) - metadata for the maximum score
*compliance: mandatory -- This method must be implemented.*
|
def _valid_request_body(
self, cert_chain, signature, serialized_request_env):
decoded_signature = base64.b64decode(signature)
public_key = cert_chain.public_key()
request_env_bytes = serialized_request_env.encode(CHARACTER_ENCODING)
try:
public_key.verify(
decoded_signature, request_env_bytes,
self._padding, self._hash_algorithm)
except InvalidSignature as e:
raise VerificationException("Request body is not valid", e)
|
Validate the request body hash with signature.
This method checks if the hash value of the request body
matches with the hash value of the signature, decrypted using
certificate chain. A
:py:class:`VerificationException` is raised if there is a
mismatch.
:param cert_chain: Certificate chain to be validated
:type cert_chain: cryptography.x509.Certificate
:param signature: Encrypted signature of the request
:type: str
:param serialized_request_env: Raw request body
:type: str
:raises: :py:class:`VerificationException` if certificate is
not valid
|
def alias_delete(indices, aliases, hosts=None, body=None, profile=None, source=None):
es = _get_instance(hosts, profile)
if source and body:
message = 'Either body or source should be specified but not both.'
raise SaltInvocationError(message)
if source:
body = __salt__['cp.get_file_str'](
source,
saltenv=__opts__.get('saltenv', 'base'))
try:
result = es.indices.delete_alias(index=indices, name=aliases)
return result.get('acknowledged', False)
except elasticsearch.exceptions.NotFoundError:
return True
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot delete alias {0} in index {1}, server returned code {2} with message {3}".format(aliases, indices, e.status_code, e.error))
|
Delete an alias of an index
indices
Single or multiple indices separated by comma, use _all to perform the operation on all indices.
aliases
Alias names separated by comma
CLI example::
salt myminion elasticsearch.alias_delete testindex_v1 testindex
|
def config_get(args):
r = fapi.get_workspace_config(args.project, args.workspace,
args.namespace, args.config)
fapi._check_response_code(r, 200)
return json.dumps(r.json(), indent=4, separators=(',', ': '),
sort_keys=True, ensure_ascii=False)
|
Retrieve a method config from a workspace, send stdout
|
def iter_batches(iterable, batch_size):
sourceiter = iter(iterable)
while True:
batchiter = islice(sourceiter, batch_size)
yield chain([batchiter.next()], batchiter)
|
Given a sequence or iterable, yield batches from that iterable until it
runs out. Note that this function returns a generator, and also each
batch will be a generator.
:param iterable: The sequence or iterable to split into batches
:param int batch_size: The number of elements of `iterable` to iterate over
in each batch
>>> batches = iter_batches('abcdefghijkl', batch_size=5)
>>> list(next(batches))
['a', 'b', 'c', 'd', 'e']
>>> list(next(batches))
['f', 'g', 'h', 'i', 'j']
>>> list(next(batches))
['k', 'l']
>>> list(next(batches))
Traceback (most recent call last):
...
StopIteration
Warning: It is important to iterate completely over each batch before
requesting the next, or batch sizes will be truncated to 1. For example,
making a list of all batches before asking for the contents of each
will not work:
>>> batches = list(iter_batches('abcdefghijkl', batch_size=5))
>>> len(batches)
12
>>> list(batches[0])
['a']
However, making a list of each individual batch as it is received will
produce expected behavior (as shown in the first example).
|
def get_scan(self, source_id, scan_id):
target_url = self.client.get_url('SCAN', 'GET', 'single', {'source_id': source_id, 'scan_id': scan_id})
return self.client.get_manager(Scan)._get(target_url)
|
Get a Scan object
:rtype: Scan
|
def _load_start_paths(self):
" Start the Read-Eval-Print Loop. "
if self._startup_paths:
for path in self._startup_paths:
if os.path.exists(path):
with open(path, 'rb') as f:
code = compile(f.read(), path, 'exec')
six.exec_(code, self.get_globals(), self.get_locals())
else:
output = self.app.output
output.write('WARNING | File not found: {}\n\n'.format(path))
|
Start the Read-Eval-Print Loop.
|
def __redirect_stdio_emit(self, value):
parent = self.parent()
while parent is not None:
try:
parent.redirect_stdio.emit(value)
except AttributeError:
parent = parent.parent()
else:
break
|
Searches through the parent tree to see if it is possible to emit the
redirect_stdio signal.
This logic allows to test the SearchInComboBox select_directory method
outside of the FindInFiles plugin.
|
def update_configurable(self, configurable_class, name, config):
configurable_class_name = configurable_class.__name__.lower()
logger.info(
"updating %s: '%s'", configurable_class_name, name
)
registry = self.registry_for(configurable_class)
if name not in registry:
logger.warn(
"Tried to update unknown %s: '%s'",
configurable_class_name, name
)
self.add_configurable(
configurable_class,
configurable_class.from_config(name, config)
)
return
registry[name].apply_config(config)
hook = self.hook_for(configurable_class, "update")
if not hook:
return
def done(f):
try:
f.result()
except Exception:
logger.exception("Error updating configurable '%s'", name)
self.work_pool.submit(hook, name, config).add_done_callback(done)
|
Callback fired when a configurable instance is updated.
Looks up the existing configurable in the proper "registry" and
`apply_config()` is called on it.
If a method named "on_<configurable classname>_update" is defined it
is called in the work pool and passed the configurable's name, the old
config and the new config.
If the updated configurable is not present, `add_configurable()` is
called instead.
|
def mkdir(path, create_parent=True, check_if_exists=False):
cmd = _format_cmd('mkdir', path, _p=create_parent)
if check_if_exists:
return 'if [[ ! -d {0} ]]; then {1}; fi'.format(path, cmd)
return cmd
|
Generates a unix command line for creating a directory.
:param path: Directory path.
:type path: unicode | str
:param create_parent: Create parent directories, if necessary. Default is ``True``.
:type create_parent: bool
:param check_if_exists: Prepend a check if the directory exists; in that case, the command is not run.
Default is ``False``.
:type check_if_exists: bool
:return: Unix shell command line.
:rtype: unicode | str
|
def get_config(self, retrieve="all"):
get_startup = retrieve == "all" or retrieve == "startup"
get_running = retrieve == "all" or retrieve == "running"
get_candidate = retrieve == "all" or retrieve == "candidate"
if retrieve == "all" or get_running:
result = self._execute_command_with_vdom('show')
text_result = '\n'.join(result)
return {
'startup': u"",
'running': py23_compat.text_type(text_result),
'candidate': u"",
}
elif get_startup or get_candidate:
return {
'startup': u"",
'running': u"",
'candidate': u"",
}
|
get_config implementation for FortiOS.
|
def get_router_id(self, tenant_id, tenant_name):
router_id = None
if tenant_id in self.tenant_dict:
router_id = self.tenant_dict.get(tenant_id).get('router_id')
if not router_id:
router_list = self.os_helper.get_rtr_by_name(
'FW_RTR_' + tenant_name)
if len(router_list) > 0:
router_id = router_list[0].get('id')
return router_id
|
Retrieve the router ID.
|
def read(self, path, environ):
try:
inp = open(path, 'rb')
except FileNotFoundError as error:
if error.errno != 2:
raise
return None
parsing = parse_vexrc(inp, environ)
for heading, key, value in parsing:
heading = self.default_heading if heading is None else heading
if heading not in self.headings:
self.headings[heading] = OrderedDict()
self.headings[heading][key] = value
parsing.close()
|
Read data from file into this vexrc instance.
|
def as_view(cls, **initkwargs):
if isinstance(getattr(cls, 'queryset', None), models.query.QuerySet):
def force_evaluation():
raise RuntimeError(
'Do not evaluate the `.queryset` attribute directly, '
'as the result will be cached and reused between requests. '
'Use `.all()` or call `.get_queryset()` instead.'
)
cls.queryset._fetch_all = force_evaluation
cls.queryset._result_iter = force_evaluation
view = super(RestView, cls).as_view(**initkwargs)
view.cls = cls
return csrf_exempt(view)
|
Store the original class on the view function.
This allows us to discover information about the view when we do URL
reverse lookups. Used for breadcrumb generation.
|
def by_name(self, name, archived=False, limit=None, page=None):
if not archived:
path = _path(self.adapter)
else:
path = _path(self.adapter, 'archived')
return self._get(path, name=name, limit=limit, page=page)
|
get adapter data by name.
|
def detach(gandi, resource, background, force):
resource = sorted(tuple(set(resource)))
if not force:
proceed = click.confirm('Are you sure you want to detach %s?' %
', '.join(resource))
if not proceed:
return
result = gandi.disk.detach(resource, background)
if background:
gandi.pretty_echo(result)
return result
|
Detach disks from currectly attached vm.
Resource can be a disk name, or ID
|
def get_backend_tfvars_file(path, environment, region):
backend_filenames = gen_backend_tfvars_files(environment, region)
for name in backend_filenames:
if os.path.isfile(os.path.join(path, name)):
return name
return backend_filenames[-1]
|
Determine Terraform backend file.
|
def end(self):
if self.lastUrl is not None:
self.html.write(u'</li>\n')
if self.lastComic is not None:
self.html.write(u'</ul>\n')
self.html.write(u'</ul>\n')
self.addNavLinks()
self.html.close()
|
End HTML output.
|
def _lock(self, url: str, name: str, hash_: str):
self._new_lock.append({
'url': url,
'name': name,
'hash': hash_,
})
self._stale_files.pop(name, None)
|
Add details of the files downloaded to _new_lock so they can be saved to the lock file.
Also remove path from _stale_files, whatever remains at the end therefore is stale and can be deleted.
|
def slack_user(request, api_data):
if request.user.is_anonymous:
return request, api_data
data = deepcopy(api_data)
slacker, _ = SlackUser.objects.get_or_create(slacker=request.user)
slacker.access_token = data.pop('access_token')
slacker.extras = data
slacker.save()
messages.add_message(request, messages.SUCCESS, 'Your account has been successfully updated with '
'Slack. You can share your messages within your slack '
'domain.')
return request, api_data
|
Pipeline for backward compatibility prior to 1.0.0 version.
In case if you're willing maintain `slack_user` table.
|
def _build_amps_list(self, amp_value, processlist):
ret = []
try:
for p in processlist:
add_it = False
if (re.search(amp_value.regex(), p['name']) is not None):
add_it = True
else:
for c in p['cmdline']:
if (re.search(amp_value.regex(), c) is not None):
add_it = True
break
if add_it:
ret.append({'pid': p['pid'],
'cpu_percent': p['cpu_percent'],
'memory_percent': p['memory_percent']})
except (TypeError, KeyError) as e:
logger.debug("Can not build AMPS list ({})".format(e))
return ret
|
Return the AMPS process list according to the amp_value
Search application monitored processes by a regular expression
|
def map_seqprop_resnums_to_seqprop_resnums(self, resnums, seqprop1, seqprop2):
resnums = ssbio.utils.force_list(resnums)
alignment = self._get_seqprop_to_seqprop_alignment(seqprop1=seqprop1, seqprop2=seqprop2)
mapped = ssbio.protein.sequence.utils.alignment.map_resnum_a_to_resnum_b(resnums=resnums,
a_aln=alignment[0],
b_aln=alignment[1])
return mapped
|
Map a residue number in any SeqProp to another SeqProp using the pairwise alignment information.
Args:
resnums (int, list): Residue numbers in seqprop1
seqprop1 (SeqProp): SeqProp object the resnums match to
seqprop2 (SeqProp): SeqProp object you want to map the resnums to
Returns:
dict: Mapping of seqprop1 residue numbers to seqprop2 residue numbers. If mappings don't exist in this
dictionary, that means the residue number cannot be mapped according to alignment!
|
def up(self) -> "InstanceNode":
ts = max(self.timestamp, self.parinst.timestamp)
return self.parinst._copy(self._zip(), ts)
|
Return an instance node corresponding to the receiver's parent.
Raises:
NonexistentInstance: If there is no parent.
|
def Name(self, number):
if number in self._enum_type.values_by_number:
return self._enum_type.values_by_number[number].name
raise ValueError('Enum %s has no name defined for value %d' % (
self._enum_type.name, number))
|
Returns a string containing the name of an enum value.
|
def _dispatch(self, textgroup, directory):
self.dispatcher.dispatch(textgroup, path=directory)
|
Sparql dispatcher do not need to dispatch works, as the link is DB stored through Textgroup
:param textgroup: A Textgroup object
:param directory: The path in which we found the textgroup
:return:
|
def display_string_dump(self, section_spec):
section = _section_from_spec(self.elf_file, section_spec)
if section is None:
print("Section '%s' does not exist in the file!" % section_spec)
return None
data = section.data()
dataptr = 0
strs = []
while dataptr < len(data):
while dataptr < len(data) and not 32 <= byte2int(data[dataptr]) <= 127:
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
strs.append(binascii.b2a_hex(
data[dataptr:endptr]).decode().upper())
dataptr = endptr
return strs
|
Display a strings dump of a section. section_spec is either a
section number or a name.
|
def _get_alm_disp_fc3(disp_dataset):
natom = disp_dataset['natom']
ndisp = len(disp_dataset['first_atoms'])
for disp1 in disp_dataset['first_atoms']:
ndisp += len(disp1['second_atoms'])
disp = np.zeros((ndisp, natom, 3), dtype='double', order='C')
indices = []
count = 0
for disp1 in disp_dataset['first_atoms']:
indices.append(count)
disp[count, disp1['number']] = disp1['displacement']
count += 1
for disp1 in disp_dataset['first_atoms']:
for disp2 in disp1['second_atoms']:
if 'included' in disp2:
if disp2['included']:
indices.append(count)
else:
indices.append(count)
disp[count, disp1['number']] = disp1['displacement']
disp[count, disp2['number']] = disp2['displacement']
count += 1
return disp, indices
|
Create displacements of atoms for ALM input
Note
----
Dipslacements of all atoms in supercells for all displacement
configurations in phono3py are returned, i.e., most of
displacements are zero. Only the configurations with 'included' ==
True are included in the list of indices that is returned, too.
Parameters
----------
disp_dataset : dict
Displacement dataset that may be obtained by
file_IO.parse_disp_fc3_yaml.
Returns
-------
disp : ndarray
Displacements of atoms in supercells of all displacement
configurations.
shape=(ndisp, natom, 3)
dtype='double'
indices : list of int
The indices of the displacement configurations with 'included' == True.
|
def setRegisterNumbersForTemporaries(ast, start):
seen = 0
signature = ''
aliases = []
for node in ast.postorderWalk():
if node.astType == 'alias':
aliases.append(node)
node = node.value
if node.reg.immediate:
node.reg.n = node.value
continue
reg = node.reg
if reg.n is None:
reg.n = start + seen
seen += 1
signature += reg.node.typecode()
for node in aliases:
node.reg = node.value.reg
return start + seen, signature
|
Assign register numbers for temporary registers, keeping track of
aliases and handling immediate operands.
|
def get_text_path(self):
for res in self.dsDoc['dataResources']:
resPath = res['resPath']
resType = res['resType']
isCollection = res['isCollection']
if resType == 'text' and isCollection:
return os.path.join(self.dsHome, resPath)
raise RuntimeError('could not find learningData file the dataset')
|
Returns the path of the directory containing text if they exist in this dataset.
|
def ssh_compute_remove(public_key, application_name, user=None):
if not (os.path.isfile(authorized_keys(application_name, user)) or
os.path.isfile(known_hosts(application_name, user))):
return
keys = ssh_authorized_keys_lines(application_name, user=None)
keys = [k.strip() for k in keys]
if public_key not in keys:
return
[keys.remove(key) for key in keys if key == public_key]
with open(authorized_keys(application_name, user), 'w') as _keys:
keys = '\n'.join(keys)
if not keys.endswith('\n'):
keys += '\n'
_keys.write(keys)
|
Remove given public key from authorized_keys file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
|
def update_in_hdx(self):
capacity = self.data.get('capacity')
if capacity is not None:
del self.data['capacity']
self._update_in_hdx('user', 'id')
if capacity is not None:
self.data['capacity'] = capacity
|
Check if user exists in HDX and if so, update user
Returns:
None
|
def bounding_box_as_binary_map(alpha, threshold=0.1):
bb = bounding_box(alpha)
x = np.zeros(alpha.shape, dtype=np.bool_)
x[bb[0]:bb[2], bb[1]:bb[3]] = 1
return x
|
Similar to `bounding_box`, except returns the bounding box as a
binary map the same size as the input.
Same parameters as `bounding_box`.
Returns
-------
binary_map : ndarray, ndim=2, dtype=np.bool_
Binary map with True if object and False if background.
|
def get_cytoband_names():
return [
n.replace(".json.gz", "")
for n in pkg_resources.resource_listdir(__name__, _data_dir)
if n.endswith(".json.gz")
]
|
Returns the names of available cytoband data files
>> get_cytoband_names()
['ucsc-hg38', 'ucsc-hg19']
|
def extract(cls, extractor, typ):
schema = {
"title": typ.__name__,
"type": "object",
"properties": {},
"required": []
}
for attribute in attr.fields(typ):
details = cls._extract_attribute(extractor, attribute)
if details.is_required:
schema["required"].append(details.name)
schema["properties"][details.name] = details.schema
return schema
|
take an attrs based class, and convert it
to jsonschema.
|
def clean_buckets(self, hash_name):
bucket_keys = self._iter_bucket_keys(hash_name)
self.redis_object.delete(*bucket_keys)
|
Removes all buckets and their content for specified hash.
|
def construct_xray_header(headers):
header_str = headers.get(http.XRAY_HEADER) or headers.get(http.ALT_XRAY_HEADER)
if header_str:
return TraceHeader.from_header_str(header_str)
else:
return TraceHeader()
|
Construct a ``TraceHeader`` object from dictionary headers
of the incoming request. This method should always return
a ``TraceHeader`` object regardless of tracing header's presence
in the incoming request.
|
def filter(self, record):
if isinstance(record.msg, basestring):
message = record.msg.lower()
if all(kw in message for kw in self.KEYWORDS):
record.levelname = 'DEBUG'
record.levelno = logging.DEBUG
return 1
|
Change the severity of selected log records.
|
def declares_namespace_package(filename):
import ast
with open(filename) as fp:
init_py = ast.parse(fp.read(), filename)
calls = [node for node in ast.walk(init_py) if isinstance(node, ast.Call)]
for call in calls:
if len(call.args) != 1:
continue
if isinstance(call.func, ast.Attribute) and call.func.attr != 'declare_namespace':
continue
if isinstance(call.func, ast.Name) and call.func.id != 'declare_namespace':
continue
if isinstance(call.args[0], ast.Name) and call.args[0].id == '__name__':
return True
return False
|
Given a filename, walk its ast and determine if it declares a namespace package.
|
def _validate_namespace(self, namespace):
if self._namespace_regex.fullmatch(namespace) is None:
LOGGER.debug('Invalid namespace: %s', namespace)
raise _ResponseFailed(self._status.INVALID_ADDRESS)
|
Validates a namespace, raising a ResponseFailed error if invalid.
Args:
state_root (str): The state_root to validate
Raises:
ResponseFailed: The state_root was invalid, and a status of
INVALID_ROOT will be sent with the response.
|
def do_IAmRequest(self, apdu):
if _debug: WhoIsIAmServices._debug("do_IAmRequest %r", apdu)
if apdu.iAmDeviceIdentifier is None:
raise MissingRequiredParameter("iAmDeviceIdentifier required")
if apdu.maxAPDULengthAccepted is None:
raise MissingRequiredParameter("maxAPDULengthAccepted required")
if apdu.segmentationSupported is None:
raise MissingRequiredParameter("segmentationSupported required")
if apdu.vendorID is None:
raise MissingRequiredParameter("vendorID required")
device_instance = apdu.iAmDeviceIdentifier[1]
if _debug: WhoIsIAmServices._debug(" - device_instance: %r", device_instance)
device_address = apdu.pduSource
if _debug: WhoIsIAmServices._debug(" - device_address: %r", device_address)
|
Respond to an I-Am request.
|
def DbGetDeviceFamilyList(self, argin):
self._log.debug("In DbGetDeviceFamilyList()")
argin = replace_wildcard(argin)
return self.db.get_device_family_list(argin)
|
Get a list of device name families for device name matching the
specified wildcard
:param argin: The wildcard
:type: tango.DevString
:return: Family list
:rtype: tango.DevVarStringArray
|
def fetch(elastic, backend, limit=None, search_after_value=None, scroll=True):
logging.debug("Creating a elastic items generator.")
elastic_scroll_id = None
search_after = search_after_value
while True:
if scroll:
rjson = get_elastic_items(elastic, elastic_scroll_id, limit)
else:
rjson = get_elastic_items_search(elastic, search_after, limit)
if rjson and "_scroll_id" in rjson:
elastic_scroll_id = rjson["_scroll_id"]
if rjson and "hits" in rjson:
if not rjson["hits"]["hits"]:
break
for hit in rjson["hits"]["hits"]:
item = hit['_source']
if 'sort' in hit:
search_after = hit['sort']
try:
backend._fix_item(item)
except Exception:
pass
yield item
else:
logging.error("No results found from %s", elastic.index_url)
break
return
|
Fetch the items from raw or enriched index
|
def create_auth_group(sender, instance, created, **kwargs):
if created:
AuthGroup.objects.create(group=instance)
|
Creates the AuthGroup model when a group is created
|
def cache_key(self):
return '%s:%s' % (super(EntryPublishedVectorBuilder, self).cache_key,
Site.objects.get_current().pk)
|
Key for the cache handling current site.
|
def setDatastreamState(self, pid, dsID, dsState):
http_args = {'dsState' : dsState}
url = 'objects/%(pid)s/datastreams/%(dsid)s' % {'pid': pid, 'dsid': dsID}
response = self.put(url, params=http_args)
return response.status_code == requests.codes.ok
|
Update datastream state.
:param pid: object pid
:param dsID: datastream id
:param dsState: datastream state
:returns: boolean success
|
def reverse_iterator(self, symbol, chunk_range=None):
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException("Symbol does not exist.")
c = CHUNKER_MAP[sym[CHUNKER]]
for chunk in list(self.get_chunk_ranges(symbol, chunk_range=chunk_range, reverse=True)):
yield self.read(symbol, chunk_range=c.to_range(chunk[0], chunk[1]))
|
Returns a generator that accesses each chunk in descending order
Parameters
----------
symbol: str
the symbol for the given item in the DB
chunk_range: None, or a range object
allows you to subset the chunks by range
Returns
-------
generator
|
def _get_assignment_target_end(self, ast_module):
if len(ast_module.body) > 1:
raise ValueError("More than one expression or assignment.")
elif len(ast_module.body) > 0 and \
type(ast_module.body[0]) is ast.Assign:
if len(ast_module.body[0].targets) != 1:
raise ValueError("More than one assignment target.")
else:
return len(ast_module.body[0].targets[0].id)
return -1
|
Returns position of 1st char after assignment traget.
If there is no assignment, -1 is returned
If there are more than one of any ( expressions or assigments)
then a ValueError is raised.
|
def my_main(context):
print('starting MyApp...')
if context['debug']:
print('Context:')
for k in context:
print('Key: {}\nValue: {}'.format(k, context[k]))
print('Done!')
return 0
|
The starting point for your app.
|
def get(self, id):
schema = PackageSchema()
resp = self.service.get_id(self.base, id)
return self.service.decode(schema, resp)
|
Get a package.
:param id: Package ID as an int.
:return: :class:`packages.Package <packages.Package>` object
:rtype: packages.Package
|
def deactivate_mfa_device(self, user_name, serial_number):
params = {'UserName' : user_name,
'SerialNumber' : serial_number}
return self.get_response('DeactivateMFADevice', params)
|
Deactivates the specified MFA device and removes it from
association with the user.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param seriasl_number: The serial number which uniquely identifies
the MFA device.
|
def modify_identity(self, identity, **kwargs):
if isinstance(identity, zobjects.Identity):
self.request('ModifyIdentity', {'identity': identity._full_data})
return self.get_identities(identity=identity.name)[0]
else:
attrs = []
for attr, value in kwargs.items():
attrs.append({
'name': attr,
'_content': value
})
self.request('ModifyIdentity', {
'identity': {
'name': identity,
'a': attrs
}
})
return self.get_identities(identity=identity)[0]
|
Modify some attributes of an identity or its name.
:param: identity a zobjects.Identity with `id` set (mandatory). Also
set items you want to modify/set and/or the `name` attribute to
rename the identity.
Can also take the name in string and then attributes to modify
:returns: zobjects.Identity object
|
def find_link(self, device):
for i in range(len(self.mpstate.mav_master)):
conn = self.mpstate.mav_master[i]
if (str(i) == device or
conn.address == device or
getattr(conn, 'label', None) == device):
return i
return None
|
find a device based on number, name or label
|
def move_transition_point(self, fragment_index, value):
self.log(u"Called move_transition_point with")
self.log([u" fragment_index %d", fragment_index])
self.log([u" value %.3f", value])
if (fragment_index < 0) or (fragment_index > (len(self) - 3)):
self.log(u"Bad fragment_index, returning")
return
current_interval = self[fragment_index].interval
next_interval = self[fragment_index + 1].interval
if value > next_interval.end:
self.log(u"Bad value, returning")
return
if not current_interval.is_non_zero_before_non_zero(next_interval):
self.log(u"Bad interval configuration, returning")
return
current_interval.end = value
next_interval.begin = value
self.log(u"Moved transition point")
|
Change the transition point between fragment ``fragment_index``
and the next fragment to the time value ``value``.
This method fails silently
(without changing the fragment list)
if at least one of the following conditions holds:
* ``fragment_index`` is negative
* ``fragment_index`` is the last or the second-to-last
* ``value`` is after the current end of the next fragment
* the current fragment and the next one are not adjacent and both proper intervals (not zero length)
The above conditions ensure that the move makes sense
and that it keeps the list satisfying the constraints.
:param int fragment_index: the fragment index whose end should be moved
:param value: the new transition point
:type value: :class:`~aeneas.exacttiming.TimeValue`
|
def assert_allowed(request, level, pid):
if not d1_gmn.app.models.ScienceObject.objects.filter(pid__did=pid).exists():
raise d1_common.types.exceptions.NotFound(
0,
'Attempted to perform operation on non-existing object. pid="{}"'.format(
pid
),
)
if not is_allowed(request, level, pid):
raise d1_common.types.exceptions.NotAuthorized(
0,
'Operation is denied. level="{}", pid="{}", active_subjects="{}"'.format(
level_to_action(level), pid, format_active_subjects(request)
),
)
|
Assert that one or more subjects are allowed to perform action on object.
Raise NotAuthorized if object exists and subject is not allowed. Raise NotFound if
object does not exist.
|
def rename(self, name):
if name is None:
raise Exception("name (%s) not-valid" % (name,))
self.prefix, self.name = splitPrefix(name)
|
Rename the element.
@param name: A new name for the element.
@type name: basestring
|
def applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=None):
from .vendored_sdks.mgmt_applicationinsights import ApplicationInsightsManagementClient
from azure.cli.core._profile import Profile
profile = Profile(cli_ctx=cli_ctx)
if subscription:
cred, _, _ = profile.get_login_credentials(subscription_id=subscription)
return ApplicationInsightsManagementClient(
cred,
subscription
)
cred, sub_id, _ = profile.get_login_credentials()
return ApplicationInsightsManagementClient(
cred,
sub_id
)
|
Initialize Log Analytics mgmt client for use with CLI.
|
def get_comments_of_incoming_per_page(self, incoming_id, per_page=1000, page=1):
return self._get_resource_per_page(
resource=INCOMING_COMMENTS,
per_page=per_page,
page=page,
params={'incoming_id': incoming_id},
)
|
Get comments of incoming per page
:param incoming_id: the incoming id
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
|
def which(exe):
def wrapper(function):
def wrapped(*args, **kwargs):
if salt.utils.path.which(exe) is None:
raise CommandNotFoundError(
'The \'{0}\' binary was not found in $PATH.'.format(exe)
)
return function(*args, **kwargs)
return identical_signature_wrapper(function, wrapped)
return wrapper
|
Decorator wrapper for salt.utils.path.which
|
def register_lists(self, category_lists, lists_init_kwargs=None, editor_init_kwargs=None):
lists_init_kwargs = lists_init_kwargs or {}
editor_init_kwargs = editor_init_kwargs or {}
for lst in category_lists:
if isinstance(lst, string_types):
lst = self.list_cls(lst, **lists_init_kwargs)
elif not isinstance(lst, CategoryList):
raise SitecatsConfigurationError(
'`CategoryRequestHandler.register_lists()` accepts only '
'`CategoryList` objects or category aliases.'
)
if self._obj:
lst.set_obj(self._obj)
for name, val in lists_init_kwargs.items():
setattr(lst, name, val)
lst.enable_editor(**editor_init_kwargs)
self._lists[lst.get_id()] = lst
|
Registers CategoryList objects to handle their requests.
:param list category_lists: CategoryList objects
:param dict lists_init_kwargs: Attributes to apply to each of CategoryList objects
|
def get_command_class(self, cmd):
try:
cmdpath = self.registry[cmd]
except KeyError:
raise CommandError("No such command %r" % cmd)
if isinstance(cmdpath, basestring):
Command = import_class(cmdpath)
else:
Command = cmdpath
return Command
|
Returns command class from the registry for a given ``cmd``.
:param cmd: command to run (key at the registry)
|
def resume(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None:
config = None
try:
config_path = find_config(config_path)
restore_from = restore_from or path.dirname(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
validate_config(config)
logging.debug('\tLoaded config: %s', config)
except Exception as ex:
fallback('Loading config failed', ex)
run(config=config, output_root=output_root, restore_from=restore_from)
|
Load config from the directory specified and start the training.
:param config_path: path to the config file or the directory in which it is stored
:param restore_from: backend-specific path to the already trained model to be restored from.
If ``None`` is passed, it is inferred from the configuration file location as the directory
it is located in.
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
|
def view_on_site(self, request, content_type_id, object_id):
try:
content_type = ContentType.objects.get(pk=content_type_id)
if not content_type.model_class():
raise Http404(_("Content type %(ct_id)s object has no associated model") % {
'ct_id': content_type_id,
})
obj = content_type.get_object_for_this_type(pk=object_id)
except (ObjectDoesNotExist, ValueError):
raise Http404(_("Content type %(ct_id)s object %(obj_id)s doesn't exist") % {
'ct_id': content_type_id,
'obj_id': object_id,
})
try:
get_absolute_url = obj.get_absolute_url
except AttributeError:
raise Http404(_("%(ct_name)s objects don't have a get_absolute_url() method") % {
'ct_name': content_type.name,
})
absurl = get_absolute_url()
return HttpResponseRedirect(absurl)
|
Redirect to an object's page based on a content-type ID and an object ID.
|
def authorizer(self, schemes, resource, action, request_args):
if not schemes:
return u'', u''
for scheme in schemes:
if scheme in self.schemes and self.has_auth_params(scheme):
cred = Context.format_auth_params(self.schemes[scheme][u'params'])
if hasattr(self, 'mfa_token'):
cred = '{}, mfa_token="{}"'.format(cred, self.mfa_token)
return scheme, cred
raise AuthenticationError(self, schemes)
|
Construct the Authorization header for a request.
Args:
schemes (list of str): Authentication schemes supported for the
requested action.
resource (str): Object upon which an action is being performed.
action (str): Action being performed.
request_args (list of str): Arguments passed to the action call.
Returns:
(str, str) A tuple of the auth scheme satisfied, and the credential
for the Authorization header or empty strings if none could be
satisfied.
|
def check_valid(self, get_params):
if self.commands._if:
return self.commands._if.check_valid(get_params)
|
see if the if condition for a block is valid
|
def close_async(self):
if self._stream is None or self._stream.closed():
self._stream = None
return
send_data = struct.pack('<i', 1) + int2byte(COMMAND.COM_QUIT)
yield self._stream.write(send_data)
self.close()
|
Send the quit message and close the socket
|
def create(self, parties):
assert parties > 0, "parties must be a positive integer."
return self.backend.add(self.key, parties, self.ttl)
|
Create the barrier for the given number of parties.
Parameters:
parties(int): The number of parties to wait for.
Returns:
bool: Whether or not the new barrier was successfully created.
|
def run(self):
_, test_data = self.data.load(train=False, test=True)
try:
self.model.fit_generator(
self.samples_to_batches(self.generate_samples(), self.args.batch_size), steps_per_epoch=self.args.steps_per_epoch,
epochs=self.epoch + self.args.epochs, validation_data=test_data,
callbacks=self.callbacks, initial_epoch=self.epoch
)
finally:
self.model.save(self.args.model)
save_params(self.args.model)
|
Train the model on randomly generated batches
|
def get_indicator(self, resource):
path = resource.real_path
if os.name != 'posix' and os.path.isdir(path):
return (os.path.getmtime(path),
len(os.listdir(path)),
os.path.getsize(path))
return (os.path.getmtime(path),
os.path.getsize(path))
|
Return the modification time and size of a `Resource`.
|
def assembly_plus_protons(input_file, path=True, pdb_name=None,
save_output=False, force_save=False):
from ampal.pdb_parser import convert_pdb_to_ampal
if path:
input_path = Path(input_file)
if not pdb_name:
pdb_name = input_path.stem[:4]
reduced_path = reduce_output_path(path=input_path)
if reduced_path.exists() and not save_output and not force_save:
reduced_assembly = convert_pdb_to_ampal(
str(reduced_path), pdb_id=pdb_name)
return reduced_assembly
if save_output:
reduced_path = output_reduce(
input_file, path=path, pdb_name=pdb_name, force=force_save)
reduced_assembly = convert_pdb_to_ampal(str(reduced_path), path=True)
else:
reduce_mmol, reduce_message = run_reduce(input_file, path=path)
if not reduce_mmol:
return None
reduced_assembly = convert_pdb_to_ampal(
reduce_mmol, path=False, pdb_id=pdb_name)
return reduced_assembly
|
Returns an Assembly with protons added by Reduce.
Notes
-----
Looks for a pre-existing Reduce output in the standard location before
running Reduce. If the protein contains oligosaccharides or glycans,
use reduce_correct_carbohydrates.
Parameters
----------
input_file : str or pathlib.Path
Location of file to be converted to Assembly or PDB file as string.
path : bool
Whether we are looking at a file or a pdb string. Defaults to file.
pdb_name : str
PDB ID of protein. Required if providing string not path.
save_output : bool
If True will save the generated assembly.
force_save : bool
If True will overwrite existing reduced assembly.
Returns
-------
reduced_assembly : AMPAL Assembly
Assembly of protein with protons added by Reduce.
|
def list(self, cur_p=''):
current_page_number = int(cur_p) if cur_p else 1
current_page_number = 1 if current_page_number < 1 else current_page_number
kwd = {
'current_page': current_page_number
}
recs = MEntity.get_all_pager(current_page_num=current_page_number)
self.render('misc/entity/entity_list.html',
imgs=recs,
cfg=config.CMS_CFG,
kwd=kwd,
userinfo=self.userinfo)
|
Lists of the entities.
|
def _neighbors_graph(self, **params) -> Dict:
response = self._get_response("graph/neighbors", format="json", **params)
return response.json()
|
Get neighbors of a node
parameters are directly passed through to SciGraph: e.g. depth, relationshipType
|
def reload_accelerators(self, *args):
if self.accel_group:
self.guake.window.remove_accel_group(self.accel_group)
self.accel_group = Gtk.AccelGroup()
self.guake.window.add_accel_group(self.accel_group)
self.load_accelerators()
|
Reassign an accel_group to guake main window and guake
context menu and calls the load_accelerators method.
|
def walk_dir(path, args, state):
if args.debug:
sys.stderr.write("Walking %s\n" % path)
for root, _dirs, files in os.walk(path):
if not safe_process_files(root, files, args, state):
return False
if state.should_quit():
return False
return True
|
Check all files in `path' to see if there is any requests that
we should send out on the bus.
|
def concentric_hexagons(radius, start=(0, 0)):
x, y = start
yield (x, y)
for r in range(1, radius + 1):
y -= 1
for dx, dy in [(1, 1), (0, 1), (-1, 0), (-1, -1), (0, -1), (1, 0)]:
for _ in range(r):
yield (x, y)
x += dx
y += dy
|
A generator which produces coordinates of concentric rings of hexagons.
Parameters
----------
radius : int
Number of layers to produce (0 is just one hexagon)
start : (x, y)
The coordinate of the central hexagon.
|
def get(request):
res = Result()
obj, created = UserPref.objects.get_or_create(user=request.user, defaults={'data': json.dumps(DefaultPrefs.copy())})
data = obj.json()
data['subscriptions'] = [_.json() for _ in GallerySubscription.objects.filter(user=request.user)]
res.append(data)
return JsonResponse(res.asDict())
|
Gets the currently logged in users preferences
:returns: json
|
def init_all_objects(self, data, target=None, single_result=True):
if single_result:
return self.init_target_object(target, data)
return list(self.expand_models(target, data))
|
Initializes model instances from given data.
Returns single instance if single_result=True.
|
def _escape(self, value):
if isinstance(value, SafeString):
return value
return shellescape.quote(value)
|
Escape given value unless it is safe.
|
def clone(name, new_name, linked=False, template=False, runas=None):
args = [salt.utils.data.decode(name), '--name', salt.utils.data.decode(new_name)]
if linked:
args.append('--linked')
if template:
args.append('--template')
return prlctl('clone', args, runas=runas)
|
Clone a VM
.. versionadded:: 2016.11.0
:param str name:
Name/ID of VM to clone
:param str new_name:
Name of the new VM
:param bool linked:
Create a linked virtual machine.
:param bool template:
Create a virtual machine template instead of a real virtual machine.
:param str runas:
The user that the prlctl command will be run as
Example:
.. code-block:: bash
salt '*' parallels.clone macvm macvm_new runas=macdev
salt '*' parallels.clone macvm macvm_templ template=True runas=macdev
|
def _ExtractYahooSearchQuery(self, url):
if 'p=' not in url:
return None
_, _, line = url.partition('p=')
before_and, _, _ = line.partition('&')
if not before_and:
return None
yahoo_search_url = before_and.split()[0]
return yahoo_search_url.replace('+', ' ')
|
Extracts a search query from a Yahoo search URL.
Examples:
https://search.yahoo.com/search?p=query
https://search.yahoo.com/search;?p=query
Args:
url (str): URL.
Returns:
str: search query or None if no query was found.
|
def append(self, data):
for k in self._entries.keys():
self._entries[k].append(data._entries[k])
|
Append a Data instance to self
|
def blocking_start(self, waiting_func=None):
self.logger.debug('threadless start')
try:
for job_params in self._get_iterator():
self.config.logger.debug('received %r', job_params)
self.quit_check()
if job_params is None:
if self.config.quit_on_empty_queue:
raise KeyboardInterrupt
self.logger.info("there is nothing to do. Sleeping "
"for %d seconds" %
self.config.idle_delay)
self._responsive_sleep(self.config.idle_delay)
continue
self.quit_check()
try:
args, kwargs = job_params
except ValueError:
args = job_params
kwargs = {}
try:
self.task_func(*args, **kwargs)
except Exception:
self.config.logger.error("Error in processing a job",
exc_info=True)
except KeyboardInterrupt:
self.logger.debug('queuingThread gets quit request')
finally:
self.quit = True
self.logger.debug("ThreadlessTaskManager dies quietly")
|
this function starts the task manager running to do tasks. The
waiting_func is normally used to do something while other threads
are running, but here we don't have other threads. So the waiting
func will never get called. I can see wanting this function to be
called at least once after the end of the task loop.
|
def encrypt(self, mesg):
seqn = next(self._tx_sn)
rv = self._tx_tinh.enc(s_msgpack.en((seqn, mesg)))
return rv
|
Wrap a message with a sequence number and encrypt it.
Args:
mesg: The mesg to encrypt.
Returns:
bytes: The encrypted message.
|
def get_func_task_path(func):
module_path = inspect.getmodule(func).__name__
task_path = '{module_path}.{func_name}'.format(
module_path=module_path,
func_name=func.__name__
)
return task_path
|
Format the modular task path for a function via inspection.
|
def should_see_in_seconds(self, text, timeout):
def check_element():
assert contains_content(world.browser, text), \
"Expected element with the given text."
wait_for(check_element)(timeout=int(timeout))
|
Assert provided text is visible within n seconds.
Be aware this text could be anywhere on the screen. Also be aware that
it might cross several HTML nodes. No determination is made between
block and inline nodes. Whitespace can be affected.
|
def backtrack(self, decision_level):
self._backtracking = True
packages = set()
while self._assignments[-1].decision_level > decision_level:
removed = self._assignments.pop(-1)
packages.add(removed.dependency.name)
if removed.is_decision():
del self._decisions[removed.dependency.name]
for package in packages:
if package in self._positive:
del self._positive[package]
if package in self._negative:
del self._negative[package]
for assignment in self._assignments:
if assignment.dependency.name in packages:
self._register(assignment)
|
Resets the current decision level to decision_level, and removes all
assignments made after that level.
|
def make_driver(loop=None):
loop = loop or asyncio.get_event_loop()
def stop(i = None):
loop.stop()
def driver(sink):
sink.control.subscribe(
on_next=stop,
on_error=stop,
on_completed=stop)
return None
return Component(call=driver, input=Sink)
|
Returns a stop driver.
The optional loop argument can be provided to use the driver in another
loop than the default one.
Parameters
-----------
loop: BaseEventLoop
The event loop to use instead of the default one.
|
def processCommit(self, commit: Commit, sender: str) -> None:
self.logger.debug("{} received COMMIT{} from {}".format(
self, (commit.viewNo, commit.ppSeqNo), sender))
if self.validateCommit(commit, sender):
self.stats.inc(TPCStat.CommitRcvd)
self.addToCommits(commit, sender)
self.logger.debug("{} processed incoming COMMIT{}".format(
self, (commit.viewNo, commit.ppSeqNo)))
|
Validate and process the COMMIT specified.
If validation is successful, return the message to the node.
:param commit: an incoming COMMIT message
:param sender: name of the node that sent the COMMIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.