id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/Bugs%20Everywhere%20(BEurtle%20fork)-1.5.0.1.-2012-07-16-.zip/Bugs Everywhere (BEurtle fork)-1.5.0.1.-2012-07-16-/libbe/storage/util/upgrade.py | import codecs
import os, os.path
import sys
import libbe
import libbe.bug
import libbe.storage.util.mapfile as mapfile
from libbe.storage import STORAGE_VERSIONS, STORAGE_VERSION
#import libbe.storage.vcs # delay import to avoid cyclic dependency
import libbe.ui.util.editor
import libbe.util
import libbe.util.encoding as encoding
import libbe.util.id
class Upgrader (object):
"Class for converting between different on-disk BE storage formats."
initial_version = None
final_version = None
def __init__(self, repo):
import libbe.storage.vcs
self.repo = repo
vcs_name = self._get_vcs_name()
if vcs_name == None:
vcs_name = 'None'
self.vcs = libbe.storage.vcs.vcs_by_name(vcs_name)
self.vcs.repo = self.repo
self.vcs.root()
def get_path(self, *args):
"""
Return the absolute path using args relative to .be.
"""
dir = os.path.join(self.repo, '.be')
if len(args) == 0:
return dir
return os.path.join(dir, *args)
def _get_vcs_name(self):
return None
def check_initial_version(self):
path = self.get_path('version')
version = encoding.get_file_contents(path, decode=True).rstrip('\n')
assert version == self.initial_version, '%s: %s' % (path, version)
def set_version(self):
path = self.get_path('version')
encoding.set_file_contents(path, self.final_version+'\n')
self.vcs._vcs_update(path)
def upgrade(self):
print >> sys.stderr, 'upgrading bugdir from "%s" to "%s"' \
% (self.initial_version, self.final_version)
self.check_initial_version()
self.set_version()
self._upgrade()
def _upgrade(self):
raise NotImplementedError
class Upgrade_1_0_to_1_1 (Upgrader):
initial_version = "Bugs Everywhere Tree 1 0"
final_version = "Bugs Everywhere Directory v1.1"
def _get_vcs_name(self):
path = self.get_path('settings')
settings = encoding.get_file_contents(path)
for line in settings.splitlines(False):
fields = line.split('=')
if len(fields) == 2 and fields[0] == 'rcs_name':
return fields[1]
return None
def _upgrade_mapfile(self, path):
contents = encoding.get_file_contents(path, decode=True)
old_format = False
for line in contents.splitlines():
if len(line.split('=')) == 2:
old_format = True
break
if old_format == True:
# translate to YAML.
newlines = []
for line in contents.splitlines():
line = line.rstrip('\n')
if len(line) == 0:
continue
fields = line.split("=")
if len(fields) == 2:
key,value = fields
newlines.append('%s: "%s"' % (key, value.replace('"','\\"')))
else:
newlines.append(line)
contents = '\n'.join(newlines)
# load the YAML and save
map = mapfile.parse(contents)
contents = mapfile.generate(map)
encoding.set_file_contents(path, contents)
self.vcs._vcs_update(path)
def _upgrade(self):
"""
Comment value field "From" -> "Author".
Homegrown mapfile -> YAML.
"""
path = self.get_path('settings')
self._upgrade_mapfile(path)
for bug_uuid in os.listdir(self.get_path('bugs')):
path = self.get_path('bugs', bug_uuid, 'values')
self._upgrade_mapfile(path)
c_path = ['bugs', bug_uuid, 'comments']
if not os.path.exists(self.get_path(*c_path)):
continue # no comments for this bug
for comment_uuid in os.listdir(self.get_path(*c_path)):
path_list = c_path + [comment_uuid, 'values']
path = self.get_path(*path_list)
self._upgrade_mapfile(path)
settings = mapfile.parse(
encoding.get_file_contents(path))
if 'From' in settings:
settings['Author'] = settings.pop('From')
encoding.set_file_contents(
path, mapfile.generate(settings))
self.vcs._vcs_update(path)
class Upgrade_1_1_to_1_2 (Upgrader):
initial_version = "Bugs Everywhere Directory v1.1"
final_version = "Bugs Everywhere Directory v1.2"
def _get_vcs_name(self):
path = self.get_path('settings')
settings = mapfile.parse(encoding.get_file_contents(path))
if 'rcs_name' in settings:
return settings['rcs_name']
return None
def _upgrade(self):
"""
BugDir settings field "rcs_name" -> "vcs_name".
"""
path = self.get_path('settings')
settings = mapfile.parse(encoding.get_file_contents(path))
if 'rcs_name' in settings:
settings['vcs_name'] = settings.pop('rcs_name')
encoding.set_file_contents(path, mapfile.generate(settings))
self.vcs._vcs_update(path)
class Upgrade_1_2_to_1_3 (Upgrader):
initial_version = "Bugs Everywhere Directory v1.2"
final_version = "Bugs Everywhere Directory v1.3"
def __init__(self, *args, **kwargs):
Upgrader.__init__(self, *args, **kwargs)
self._targets = {} # key: target text,value: new target bug
def _get_vcs_name(self):
path = self.get_path('settings')
settings = mapfile.parse(encoding.get_file_contents(path))
if 'vcs_name' in settings:
return settings['vcs_name']
return None
def _save_bug_settings(self, bug):
# The target bugs don't have comments
path = self.get_path('bugs', bug.uuid, 'values')
if not os.path.exists(path):
self.vcs._add_path(path, directory=False)
path = self.get_path('bugs', bug.uuid, 'values')
mf = mapfile.generate(bug._get_saved_settings())
encoding.set_file_contents(path, mf)
self.vcs._vcs_update(path)
def _target_bug(self, target_text):
if target_text not in self._targets:
bug = libbe.bug.Bug(summary=target_text)
bug.severity = 'target'
self._targets[target_text] = bug
return self._targets[target_text]
def _upgrade_bugdir_mapfile(self):
path = self.get_path('settings')
mf = encoding.get_file_contents(path)
if mf == libbe.util.InvalidObject:
return # settings file does not exist
settings = mapfile.parse(mf)
if 'target' in settings:
settings['target'] = self._target_bug(settings['target']).uuid
mf = mapfile.generate(settings)
encoding.set_file_contents(path, mf)
self.vcs._vcs_update(path)
def _upgrade_bug_mapfile(self, bug_uuid):
import libbe.command.depend as dep
path = self.get_path('bugs', bug_uuid, 'values')
mf = encoding.get_file_contents(path)
if mf == libbe.util.InvalidObject:
return # settings file does not exist
settings = mapfile.parse(mf)
if 'target' in settings:
target_bug = self._target_bug(settings['target'])
blocked_by_string = '%s%s' % (dep.BLOCKED_BY_TAG, bug_uuid)
dep._add_remove_extra_string(target_bug, blocked_by_string, add=True)
blocks_string = dep._generate_blocks_string(target_bug)
estrs = settings.get('extra_strings', [])
estrs.append(blocks_string)
settings['extra_strings'] = sorted(estrs)
settings.pop('target')
mf = mapfile.generate(settings)
encoding.set_file_contents(path, mf)
self.vcs._vcs_update(path)
def _upgrade(self):
"""
Bug value field "target" -> target bugs.
Bugdir value field "target" -> pointer to current target bug.
"""
for bug_uuid in os.listdir(self.get_path('bugs')):
self._upgrade_bug_mapfile(bug_uuid)
self._upgrade_bugdir_mapfile()
for bug in self._targets.values():
self._save_bug_settings(bug)
class Upgrade_1_3_to_1_4 (Upgrader):
initial_version = "Bugs Everywhere Directory v1.3"
final_version = "Bugs Everywhere Directory v1.4"
def _get_vcs_name(self):
path = self.get_path('settings')
settings = mapfile.parse(encoding.get_file_contents(path))
if 'vcs_name' in settings:
return settings['vcs_name']
return None
def _upgrade(self):
"""
add new directory "./be/BUGDIR-UUID"
"./be/bugs" -> "./be/BUGDIR-UUID/bugs"
"./be/settings" -> "./be/BUGDIR-UUID/settings"
"""
self.repo = os.path.abspath(self.repo)
basenames = [p for p in os.listdir(self.get_path())]
if not 'bugs' in basenames and not 'settings' in basenames \
and len([p for p in basenames if len(p)==36]) == 1:
return # the user has upgraded the directory.
basenames = [p for p in basenames if p in ['bugs','settings']]
uuid = libbe.util.id.uuid_gen()
add = [self.get_path(uuid)]
move = [(self.get_path(p), self.get_path(uuid, p)) for p in basenames]
msg = ['Upgrading BE directory version v1.3 to v1.4',
'',
"Because BE's VCS drivers don't support 'move',",
'please make the following changes with your VCS',
'and re-run BE. Note that you can choose a different',
'bugdir UUID to preserve uniformity across branches',
'of a distributed repository.'
'',
'add',
' ' + '\n '.join(add),
'move',
' ' + '\n '.join(['%s %s' % (a,b) for a,b in move]),
]
self.vcs._cached_path_id.destroy()
raise Exception('Need user assistance\n%s' % '\n'.join(msg))
upgraders = [Upgrade_1_0_to_1_1,
Upgrade_1_1_to_1_2,
Upgrade_1_2_to_1_3,
Upgrade_1_3_to_1_4]
upgrade_classes = {}
for upgrader in upgraders:
upgrade_classes[(upgrader.initial_version,upgrader.final_version)]=upgrader
def upgrade(path, current_version,
target_version=STORAGE_VERSION):
"""
Call the appropriate upgrade function to convert current_version
to target_version. If a direct conversion function does not exist,
use consecutive conversion functions.
"""
if current_version not in STORAGE_VERSIONS:
raise NotImplementedError, \
"Cannot handle version '%s' yet." % current_version
if target_version not in STORAGE_VERSIONS:
raise NotImplementedError, \
"Cannot handle version '%s' yet." % current_version
if (current_version, target_version) in upgrade_classes:
# direct conversion
upgrade_class = upgrade_classes[(current_version, target_version)]
u = upgrade_class(path)
u.upgrade()
else:
# consecutive single-step conversion
i = STORAGE_VERSIONS.index(current_version)
while True:
version_a = STORAGE_VERSIONS[i]
version_b = STORAGE_VERSIONS[i+1]
try:
upgrade_class = upgrade_classes[(version_a, version_b)]
except KeyError:
raise NotImplementedError, \
"Cannot convert version '%s' to '%s' yet." \
% (version_a, version_b)
u = upgrade_class(path)
u.upgrade()
if version_b == target_version:
break
i += 1 | PypiClean |
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/AADeepLearning/datasets/generic_utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import numpy as np
import time
import sys
import six
import marshal
import types as python_types
import inspect
import codecs
import collections
_GLOBAL_CUSTOM_OBJECTS = {}
class CustomObjectScope(object):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
# Example
Consider a custom object `MyObject` (e.g. a class):
```python
with CustomObjectScope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
def custom_object_scope(*args):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
# Example
Consider a custom object `MyObject`
```python
with custom_object_scope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
# Arguments
*args: Variable length list of dictionaries of name,
class pairs to add to custom objects.
# Returns
Object of type `CustomObjectScope`.
"""
return CustomObjectScope(*args)
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.
# Example
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
# Returns
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
def serialize_aadeeplearning_object(instance):
if instance is None:
return None
if hasattr(instance, 'get_config'):
return {
'class_name': instance.__class__.__name__,
'config': instance.get_config()
}
if hasattr(instance, '__name__'):
return instance.__name__
else:
raise ValueError('Cannot serialize', instance)
def deserialize_aadeeplearning_object(identifier, module_objects=None,
custom_objects=None,
printable_module_name='object'):
if isinstance(identifier, dict):
# In this case we are dealing with a AADeepLearning config dictionary.
config = identifier
if 'class_name' not in config or 'config' not in config:
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
if custom_objects and class_name in custom_objects:
cls = custom_objects[class_name]
elif class_name in _GLOBAL_CUSTOM_OBJECTS:
cls = _GLOBAL_CUSTOM_OBJECTS[class_name]
else:
module_objects = module_objects or {}
cls = module_objects.get(class_name)
if cls is None:
raise ValueError('Unknown ' + printable_module_name +
': ' + class_name)
if hasattr(cls, 'from_config'):
custom_objects = custom_objects or {}
if has_arg(cls.from_config, 'custom_objects'):
return cls.from_config(
config['config'],
custom_objects=dict(list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
with CustomObjectScope(custom_objects):
return cls.from_config(config['config'])
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
with CustomObjectScope(custom_objects):
return cls(**config['config'])
elif isinstance(identifier, six.string_types):
function_name = identifier
if custom_objects and function_name in custom_objects:
fn = custom_objects.get(function_name)
elif function_name in _GLOBAL_CUSTOM_OBJECTS:
fn = _GLOBAL_CUSTOM_OBJECTS[function_name]
else:
fn = module_objects.get(function_name)
if fn is None:
raise ValueError('Unknown ' + printable_module_name +
':' + function_name)
return fn
else:
raise ValueError('Could not interpret serialized ' +
printable_module_name + ': ' + identifier)
def func_dump(func):
"""Serializes a user defined function.
# Arguments
func: the function to serialize.
# Returns
A tuple `(code, defaults, closure)`.
"""
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, 'base64').decode('ascii')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
# Arguments
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
# Returns
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
"""Ensures that a value is converted to a python cell object.
# Arguments
value: Any value that needs to be casted to the cell type
# Returns
A value wrapped as a cell object (see function "func_load")
"""
def dummy_fn():
value # just access it so it gets captured in .__closure__
cell_value = dummy_fn.__closure__[0]
if not isinstance(value, type(cell_value)):
return cell_value
else:
return value
if closure is not None:
closure = tuple(ensure_value_to_cell(_) for _ in closure)
try:
raw_code = codecs.decode(code.encode('ascii'), 'base64')
code = marshal.loads(raw_code)
except (UnicodeEncodeError, binascii.Error, ValueError):
# backwards compatibility for models serialized prior to 2.1.2
raw_code = code.encode('raw_unicode_escape')
code = marshal.loads(raw_code)
if globs is None:
globs = globals()
return python_types.FunctionType(code, globs,
name=code.co_name,
argdefs=defaults,
closure=closure)
def has_arg(fn, name, accept_all=False):
"""Checks if a callable accepts a given keyword argument.
For Python 2, checks if there is an argument with the given name.
For Python 3, checks if there is an argument with the given name, and
also whether this argument can be called with a keyword (i.e. if it is
not a positional-only argument).
# Arguments
fn: Callable to inspect.
name: Check if `fn` can be called with `name` as a keyword argument.
accept_all: What to return if there is no parameter called `name`
but the function accepts a `**kwargs` argument.
# Returns
bool, whether `fn` accepts a `name` keyword argument.
"""
if sys.version_info < (3,):
arg_spec = inspect.getargspec(fn)
if accept_all and arg_spec.keywords is not None:
return True
return (name in arg_spec.args)
elif sys.version_info < (3, 3):
arg_spec = inspect.getfullargspec(fn)
if accept_all and arg_spec.varkw is not None:
return True
return (name in arg_spec.args or
name in arg_spec.kwonlyargs)
else:
signature = inspect.signature(fn)
parameter = signature.parameters.get(name)
if parameter is None:
if accept_all:
for param in signature.parameters.values():
if param.kind == inspect.Parameter.VAR_KEYWORD:
return True
return False
return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY))
class Progbar(object):
"""Displays a progress bar.
# Arguments
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over time. Metrics in this list
will be displayed as-is. All others will be averaged
by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1, interval=0.05,
stateful_metrics=None):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules)
self._total_width = 0
self._seen_so_far = 0
self._values = collections.OrderedDict()
self._start = time.time()
self._last_update = 0
def update(self, current, values=None):
"""Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples:
`(name, value_for_last_step)`.
If `name` is in `stateful_metrics`,
`value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
"""
values = values or []
for k, v in values:
if k not in self.stateful_metrics:
if k not in self._values:
self._values[k] = [v * (current - self._seen_so_far),
current - self._seen_so_far]
else:
self._values[k][0] += v * (current - self._seen_so_far)
self._values[k][1] += (current - self._seen_so_far)
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if self.verbose == 1:
if (now - self._last_update < self.interval and
self.target is not None and current < self.target):
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%d [' % (numdigits, self.target)
bar = barstr % current
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is not None and current < self.target:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = ('%d:%02d:%02d' %
(eta // 3600, (eta % 3600) // 60, eta % 60))
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
else:
if time_per_unit >= 1:
info += ' %.0fs/step' % time_per_unit
elif time_per_unit >= 1e-3:
info += ' %.0fms/step' % (time_per_unit * 1e3)
else:
info += ' %.0fus/step' % (time_per_unit * 1e6)
for k in self._values:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(
self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if self.target is not None and current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is None or current >= self.target:
for k in self._values:
info += ' - %s:' % k
avg = np.mean(
self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
def to_list(x, allow_tuple=False):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
# Arguments
x: target object to be normalized.
allow_tuple: If False and x is a tuple,
it will be converted into a list
with a single element (the tuple).
Else converts the tuple to a list.
# Returns
A list.
"""
if isinstance(x, list):
return x
if allow_tuple and isinstance(x, tuple):
return list(x)
return [x]
def unpack_singleton(x):
"""Gets the first element if the iterable has only one value.
Otherwise return the iterable.
# Argument:
x: A list or tuple.
# Returns:
The same iterable or the first element.
"""
if len(x) == 1:
return x[0]
return x
def object_list_uid(object_list):
object_list = to_list(object_list)
return ', '.join([str(abs(id(x))) for x in object_list])
def is_all_none(iterable_or_element):
iterable = to_list(iterable_or_element, allow_tuple=True)
for element in iterable:
if element is not None:
return False
return True
def slice_arrays(arrays, start=None, stop=None):
"""Slices an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `_slice_arrays(x, indices)`
# Arguments
arrays: Single array or list of arrays.
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
# Returns
A slice of the array(s).
"""
if arrays is None:
return [None]
elif isinstance(arrays, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [None if x is None else x[start] for x in arrays]
else:
return [None if x is None else x[start:stop] for x in arrays]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
elif hasattr(start, '__getitem__'):
return arrays[start:stop]
else:
return [None]
def transpose_shape(shape, target_format, spatial_axes):
"""Converts a tuple or a list to the correct `data_format`.
It does so by switching the positions of its elements.
# Arguments
shape: Tuple or list, often representing shape,
corresponding to `'channels_last'`.
target_format: A string, either `'channels_first'` or `'channels_last'`.
spatial_axes: A tuple of integers.
Correspond to the indexes of the spatial axes.
For example, if you pass a shape
representing (batch_size, timesteps, rows, cols, channels),
then `spatial_axes=(2, 3)`.
# Returns
A tuple or list, with the elements permuted according
to `target_format`.
# Example
```python
>>> from aadeeplearning.utils.generic_utils import transpose_shape
>>> transpose_shape((16, 128, 128, 32),'channels_first', spatial_axes=(1, 2))
(16, 32, 128, 128)
>>> transpose_shape((16, 128, 128, 32), 'channels_last', spatial_axes=(1, 2))
(16, 128, 128, 32)
>>> transpose_shape((128, 128, 32), 'channels_first', spatial_axes=(0, 1))
(32, 128, 128)
```
# Raises
ValueError: if `value` or the global `data_format` invalid.
"""
if target_format == 'channels_first':
new_values = shape[:spatial_axes[0]]
new_values += (shape[-1],)
new_values += tuple(shape[x] for x in spatial_axes)
if isinstance(shape, list):
return list(new_values)
return new_values
elif target_format == 'channels_last':
return shape
else:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(target_format)) | PypiClean |
/Electrum-VTC-2.9.3.3.tar.gz/Electrum-VTC-2.9.3.3/packages/google/protobuf/any_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/any.proto',
package='google.protobuf',
syntax='proto3',
serialized_pb=_b('\n\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"&\n\x03\x41ny\x12\x10\n\x08type_url\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\x42o\n\x13\x63om.google.protobufB\x08\x41nyProtoP\x01Z%github.com/golang/protobuf/ptypes/any\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3')
)
_ANY = _descriptor.Descriptor(
name='Any',
full_name='google.protobuf.Any',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type_url', full_name='google.protobuf.Any.type_url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.Any.value', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=46,
serialized_end=84,
)
DESCRIPTOR.message_types_by_name['Any'] = _ANY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Any = _reflection.GeneratedProtocolMessageType('Any', (_message.Message,), dict(
DESCRIPTOR = _ANY,
__module__ = 'google.protobuf.any_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Any)
))
_sym_db.RegisterMessage(Any)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.google.protobufB\010AnyProtoP\001Z%github.com/golang/protobuf/ptypes/any\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes'))
# @@protoc_insertion_point(module_scope) | PypiClean |
/Flask-MDEditor-0.1.4.tar.gz/Flask-MDEditor-0.1.4/flask_mdeditor/static/mdeditor/js/lib/codemirror/mode/tiki/tiki.js |
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode('tiki', function(config) {
function inBlock(style, terminator, returnTokenizer) {
return function(stream, state) {
while (!stream.eol()) {
if (stream.match(terminator)) {
state.tokenize = inText;
break;
}
stream.next();
}
if (returnTokenizer) state.tokenize = returnTokenizer;
return style;
};
}
function inLine(style) {
return function(stream, state) {
while(!stream.eol()) {
stream.next();
}
state.tokenize = inText;
return style;
};
}
function inText(stream, state) {
function chain(parser) {
state.tokenize = parser;
return parser(stream, state);
}
var sol = stream.sol();
var ch = stream.next();
//non start of line
switch (ch) { //switch is generally much faster than if, so it is used here
case "{": //plugin
stream.eat("/");
stream.eatSpace();
var tagName = "";
var c;
while ((c = stream.eat(/[^\s\u00a0=\"\'\/?(}]/))) tagName += c;
state.tokenize = inPlugin;
return "tag";
break;
case "_": //bold
if (stream.eat("_")) {
return chain(inBlock("strong", "__", inText));
}
break;
case "'": //italics
if (stream.eat("'")) {
// Italic text
return chain(inBlock("em", "''", inText));
}
break;
case "(":// Wiki Link
if (stream.eat("(")) {
return chain(inBlock("variable-2", "))", inText));
}
break;
case "[":// Weblink
return chain(inBlock("variable-3", "]", inText));
break;
case "|": //table
if (stream.eat("|")) {
return chain(inBlock("comment", "||"));
}
break;
case "-":
if (stream.eat("=")) {//titleBar
return chain(inBlock("header string", "=-", inText));
} else if (stream.eat("-")) {//deleted
return chain(inBlock("error tw-deleted", "--", inText));
}
break;
case "=": //underline
if (stream.match("==")) {
return chain(inBlock("tw-underline", "===", inText));
}
break;
case ":":
if (stream.eat(":")) {
return chain(inBlock("comment", "::"));
}
break;
case "^": //box
return chain(inBlock("tw-box", "^"));
break;
case "~": //np
if (stream.match("np~")) {
return chain(inBlock("meta", "~/np~"));
}
break;
}
//start of line types
if (sol) {
switch (ch) {
case "!": //header at start of line
if (stream.match('!!!!!')) {
return chain(inLine("header string"));
} else if (stream.match('!!!!')) {
return chain(inLine("header string"));
} else if (stream.match('!!!')) {
return chain(inLine("header string"));
} else if (stream.match('!!')) {
return chain(inLine("header string"));
} else {
return chain(inLine("header string"));
}
break;
case "*": //unordered list line item, or <li /> at start of line
case "#": //ordered list line item, or <li /> at start of line
case "+": //ordered list line item, or <li /> at start of line
return chain(inLine("tw-listitem bracket"));
break;
}
}
//stream.eatWhile(/[&{]/); was eating up plugins, turned off to act less like html and more like tiki
return null;
}
var indentUnit = config.indentUnit;
// Return variables for tokenizers
var pluginName, type;
function inPlugin(stream, state) {
var ch = stream.next();
var peek = stream.peek();
if (ch == "}") {
state.tokenize = inText;
//type = ch == ")" ? "endPlugin" : "selfclosePlugin"; inPlugin
return "tag";
} else if (ch == "(" || ch == ")") {
return "bracket";
} else if (ch == "=") {
type = "equals";
if (peek == ">") {
ch = stream.next();
peek = stream.peek();
}
//here we detect values directly after equal character with no quotes
if (!/[\'\"]/.test(peek)) {
state.tokenize = inAttributeNoQuote();
}
//end detect values
return "operator";
} else if (/[\'\"]/.test(ch)) {
state.tokenize = inAttribute(ch);
return state.tokenize(stream, state);
} else {
stream.eatWhile(/[^\s\u00a0=\"\'\/?]/);
return "keyword";
}
}
function inAttribute(quote) {
return function(stream, state) {
while (!stream.eol()) {
if (stream.next() == quote) {
state.tokenize = inPlugin;
break;
}
}
return "string";
};
}
function inAttributeNoQuote() {
return function(stream, state) {
while (!stream.eol()) {
var ch = stream.next();
var peek = stream.peek();
if (ch == " " || ch == "," || /[ )}]/.test(peek)) {
state.tokenize = inPlugin;
break;
}
}
return "string";
};
}
var curState, setStyle;
function pass() {
for (var i = arguments.length - 1; i >= 0; i--) curState.cc.push(arguments[i]);
}
function cont() {
pass.apply(null, arguments);
return true;
}
function pushContext(pluginName, startOfLine) {
var noIndent = curState.context && curState.context.noIndent;
curState.context = {
prev: curState.context,
pluginName: pluginName,
indent: curState.indented,
startOfLine: startOfLine,
noIndent: noIndent
};
}
function popContext() {
if (curState.context) curState.context = curState.context.prev;
}
function element(type) {
if (type == "openPlugin") {curState.pluginName = pluginName; return cont(attributes, endplugin(curState.startOfLine));}
else if (type == "closePlugin") {
var err = false;
if (curState.context) {
err = curState.context.pluginName != pluginName;
popContext();
} else {
err = true;
}
if (err) setStyle = "error";
return cont(endcloseplugin(err));
}
else if (type == "string") {
if (!curState.context || curState.context.name != "!cdata") pushContext("!cdata");
if (curState.tokenize == inText) popContext();
return cont();
}
else return cont();
}
function endplugin(startOfLine) {
return function(type) {
if (
type == "selfclosePlugin" ||
type == "endPlugin"
)
return cont();
if (type == "endPlugin") {pushContext(curState.pluginName, startOfLine); return cont();}
return cont();
};
}
function endcloseplugin(err) {
return function(type) {
if (err) setStyle = "error";
if (type == "endPlugin") return cont();
return pass();
};
}
function attributes(type) {
if (type == "keyword") {setStyle = "attribute"; return cont(attributes);}
if (type == "equals") return cont(attvalue, attributes);
return pass();
}
function attvalue(type) {
if (type == "keyword") {setStyle = "string"; return cont();}
if (type == "string") return cont(attvaluemaybe);
return pass();
}
function attvaluemaybe(type) {
if (type == "string") return cont(attvaluemaybe);
else return pass();
}
return {
startState: function() {
return {tokenize: inText, cc: [], indented: 0, startOfLine: true, pluginName: null, context: null};
},
token: function(stream, state) {
if (stream.sol()) {
state.startOfLine = true;
state.indented = stream.indentation();
}
if (stream.eatSpace()) return null;
setStyle = type = pluginName = null;
var style = state.tokenize(stream, state);
if ((style || type) && style != "comment") {
curState = state;
while (true) {
var comb = state.cc.pop() || element;
if (comb(type || style)) break;
}
}
state.startOfLine = false;
return setStyle || style;
},
indent: function(state, textAfter) {
var context = state.context;
if (context && context.noIndent) return 0;
if (context && /^{\//.test(textAfter))
context = context.prev;
while (context && !context.startOfLine)
context = context.prev;
if (context) return context.indent + indentUnit;
else return 0;
},
electricChars: "/"
};
});
CodeMirror.defineMIME("text/tiki", "tiki");
}); | PypiClean |
/django-chuck-0.2.3.tar.gz/django-chuck/modules/feincms/project/static/scripts/libs/tiny_mce/plugins/table/js/row.js | tinyMCEPopup.requireLangPack();
function init() {
tinyMCEPopup.resizeToInnerSize();
document.getElementById('backgroundimagebrowsercontainer').innerHTML = getBrowserHTML('backgroundimagebrowser','backgroundimage','image','table');
document.getElementById('bgcolor_pickcontainer').innerHTML = getColorPickerHTML('bgcolor_pick','bgcolor');
var inst = tinyMCEPopup.editor;
var dom = inst.dom;
var trElm = dom.getParent(inst.selection.getStart(), "tr");
var formObj = document.forms[0];
var st = dom.parseStyle(dom.getAttrib(trElm, "style"));
// Get table row data
var rowtype = trElm.parentNode.nodeName.toLowerCase();
var align = dom.getAttrib(trElm, 'align');
var valign = dom.getAttrib(trElm, 'valign');
var height = trimSize(getStyle(trElm, 'height', 'height'));
var className = dom.getAttrib(trElm, 'class');
var bgcolor = convertRGBToHex(getStyle(trElm, 'bgcolor', 'backgroundColor'));
var backgroundimage = getStyle(trElm, 'background', 'backgroundImage').replace(new RegExp("url\\(['\"]?([^'\"]*)['\"]?\\)", 'gi'), "$1");
var id = dom.getAttrib(trElm, 'id');
var lang = dom.getAttrib(trElm, 'lang');
var dir = dom.getAttrib(trElm, 'dir');
selectByValue(formObj, 'rowtype', rowtype);
// Any cells selected
if (dom.select('td.mceSelected,th.mceSelected', trElm).length == 0) {
// Setup form
addClassesToList('class', 'table_row_styles');
TinyMCE_EditableSelects.init();
formObj.bgcolor.value = bgcolor;
formObj.backgroundimage.value = backgroundimage;
formObj.height.value = height;
formObj.id.value = id;
formObj.lang.value = lang;
formObj.style.value = dom.serializeStyle(st);
selectByValue(formObj, 'align', align);
selectByValue(formObj, 'valign', valign);
selectByValue(formObj, 'class', className, true, true);
selectByValue(formObj, 'dir', dir);
// Resize some elements
if (isVisible('backgroundimagebrowser'))
document.getElementById('backgroundimage').style.width = '180px';
updateColor('bgcolor_pick', 'bgcolor');
} else
tinyMCEPopup.dom.hide('action');
}
function updateAction() {
var inst = tinyMCEPopup.editor, dom = inst.dom, trElm, tableElm, formObj = document.forms[0];
var action = getSelectValue(formObj, 'action');
if (!AutoValidator.validate(formObj)) {
tinyMCEPopup.alert(AutoValidator.getErrorMessages(formObj).join('. ') + '.');
return false;
}
tinyMCEPopup.restoreSelection();
trElm = dom.getParent(inst.selection.getStart(), "tr");
tableElm = dom.getParent(inst.selection.getStart(), "table");
// Update all selected rows
if (dom.select('td.mceSelected,th.mceSelected', trElm).length > 0) {
tinymce.each(tableElm.rows, function(tr) {
var i;
for (i = 0; i < tr.cells.length; i++) {
if (dom.hasClass(tr.cells[i], 'mceSelected')) {
updateRow(tr, true);
return;
}
}
});
inst.addVisual();
inst.nodeChanged();
inst.execCommand('mceEndUndoLevel');
tinyMCEPopup.close();
return;
}
switch (action) {
case "row":
updateRow(trElm);
break;
case "all":
var rows = tableElm.getElementsByTagName("tr");
for (var i=0; i<rows.length; i++)
updateRow(rows[i], true);
break;
case "odd":
case "even":
var rows = tableElm.getElementsByTagName("tr");
for (var i=0; i<rows.length; i++) {
if ((i % 2 == 0 && action == "odd") || (i % 2 != 0 && action == "even"))
updateRow(rows[i], true, true);
}
break;
}
inst.addVisual();
inst.nodeChanged();
inst.execCommand('mceEndUndoLevel');
tinyMCEPopup.close();
}
function updateRow(tr_elm, skip_id, skip_parent) {
var inst = tinyMCEPopup.editor;
var formObj = document.forms[0];
var dom = inst.dom;
var curRowType = tr_elm.parentNode.nodeName.toLowerCase();
var rowtype = getSelectValue(formObj, 'rowtype');
var doc = inst.getDoc();
// Update row element
if (!skip_id)
dom.setAttrib(tr_elm, 'id', formObj.id.value);
dom.setAttrib(tr_elm, 'align', getSelectValue(formObj, 'align'));
dom.setAttrib(tr_elm, 'vAlign', getSelectValue(formObj, 'valign'));
dom.setAttrib(tr_elm, 'lang', formObj.lang.value);
dom.setAttrib(tr_elm, 'dir', getSelectValue(formObj, 'dir'));
dom.setAttrib(tr_elm, 'style', dom.serializeStyle(dom.parseStyle(formObj.style.value)));
dom.setAttrib(tr_elm, 'class', getSelectValue(formObj, 'class'));
// Clear deprecated attributes
dom.setAttrib(tr_elm, 'background', '');
dom.setAttrib(tr_elm, 'bgColor', '');
dom.setAttrib(tr_elm, 'height', '');
// Set styles
tr_elm.style.height = getCSSSize(formObj.height.value);
tr_elm.style.backgroundColor = formObj.bgcolor.value;
if (formObj.backgroundimage.value != "")
tr_elm.style.backgroundImage = "url('" + formObj.backgroundimage.value + "')";
else
tr_elm.style.backgroundImage = '';
// Setup new rowtype
if (curRowType != rowtype && !skip_parent) {
// first, clone the node we are working on
var newRow = tr_elm.cloneNode(1);
// next, find the parent of its new destination (creating it if necessary)
var theTable = dom.getParent(tr_elm, "table");
var dest = rowtype;
var newParent = null;
for (var i = 0; i < theTable.childNodes.length; i++) {
if (theTable.childNodes[i].nodeName.toLowerCase() == dest)
newParent = theTable.childNodes[i];
}
if (newParent == null) {
newParent = doc.createElement(dest);
if (theTable.firstChild.nodeName == 'CAPTION')
inst.dom.insertAfter(newParent, theTable.firstChild);
else
theTable.insertBefore(newParent, theTable.firstChild);
}
// append the row to the new parent
newParent.appendChild(newRow);
// remove the original
tr_elm.parentNode.removeChild(tr_elm);
// set tr_elm to the new node
tr_elm = newRow;
}
dom.setAttrib(tr_elm, 'style', dom.serializeStyle(dom.parseStyle(tr_elm.style.cssText)));
}
function changedBackgroundImage() {
var formObj = document.forms[0], dom = tinyMCEPopup.editor.dom;
var st = dom.parseStyle(formObj.style.value);
st['background-image'] = "url('" + formObj.backgroundimage.value + "')";
formObj.style.value = dom.serializeStyle(st);
}
function changedStyle() {
var formObj = document.forms[0], dom = tinyMCEPopup.editor.dom;
var st = dom.parseStyle(formObj.style.value);
if (st['background-image'])
formObj.backgroundimage.value = st['background-image'].replace(new RegExp("url\\('?([^']*)'?\\)", 'gi'), "$1");
else
formObj.backgroundimage.value = '';
if (st['height'])
formObj.height.value = trimSize(st['height']);
if (st['background-color']) {
formObj.bgcolor.value = st['background-color'];
updateColor('bgcolor_pick','bgcolor');
}
}
function changedSize() {
var formObj = document.forms[0], dom = tinyMCEPopup.editor.dom;
var st = dom.parseStyle(formObj.style.value);
var height = formObj.height.value;
if (height != "")
st['height'] = getCSSSize(height);
else
st['height'] = "";
formObj.style.value = dom.serializeStyle(st);
}
function changedColor() {
var formObj = document.forms[0], dom = tinyMCEPopup.editor.dom;
var st = dom.parseStyle(formObj.style.value);
st['background-color'] = formObj.bgcolor.value;
formObj.style.value = dom.serializeStyle(st);
}
tinyMCEPopup.onInit.add(init); | PypiClean |
/Flask-Saved-1.0.6.tar.gz/Flask-Saved-1.0.6/flask_saved/__init__.py | from werkzeug.utils import import_string
from flask import current_app
_DRIVES = {
'local':'flask_saved.providers.local.LocalStorage',
'oss': 'flask_saved.providers.oss.OssStorage'
}
class Storage:
def __init__(self, app=None):
self.default_provider = None
if app is not None:
self.init_app(app)
@staticmethod
def provider(self, name=None):
_provider = name if name is not None else current_app.config['STORAGE_PROVIDER_DEFAULT']
if _provider not in _DRIVES:
raise RuntimeError('Storage Provider error')
_provider_object = import_string(_DRIVES[_provider])
return _provider_object()
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
current_provider = current_app.config['STORAGE_PROVIDER_DEFAULT']
if current_provider not in _DRIVES:
raise RuntimeError('Storage Provider error')
_provider_object = import_string(_DRIVES[current_provider])
return getattr(_provider_object(), key)
def init_app(self, app):
# STORAGE 默认使用的
default_provider = app.config.setdefault('STORAGE_PROVIDER_DEFAULT', 'local')
if default_provider not in _DRIVES:
raise RuntimeError('STORAGE_PROVIDER_DEFAULT set error')
# LOCAL 提供器配置项
app.config.setdefault('STORAGE_LOCAL_BASE_PATH', 'upload')
app.config.setdefault('STORAGE_LOCAL_BASE_URL', None)
# OSS 提供器配置
oss_key = app.config.setdefault('STORAGE_OSS_ACCESS_KEY', None)
oss_secret = app.config.setdefault('STORAGE_OSS_SECRET_KEY', None)
oss_endpoint = app.config.setdefault('STORAGE_OSS_ENDPOINT', None)
oss_bucket = app.config.setdefault('STORAGE_OSS_BUCKET', None)
app.config.setdefault('STORAGE_OSS_CNAME', None)
app.config.setdefault('STORAGE_OSS_DOMIAN', None)
app.config.setdefault('STORAGE_OSS_BASE_PATH', None)
# 使用oss提供器 必须设置的配置项
if default_provider == 'oss':
if oss_key is None:
raise RuntimeError('STORAGE_OSS_ACCESS_KEY must be set')
if oss_secret is None:
raise RuntimeError('STORAGE_OSS_SECRET_KEY must be set')
if oss_endpoint is None:
raise RuntimeError('STORAGE_OSS_ENDPOINT must be set')
if oss_bucket is None:
raise RuntimeError('STORAGE_OSS_BUCKET must be set')
self.default_provider = default_provider
app.extensions['storage'] = self | PypiClean |
/FreezeUI_U-0.0.5.tar.gz/FreezeUI_U-0.0.5/FreezeUI_U/msi_win.py | from PyQt6.QtGui import *
from PyQt6.QtCore import *
from PyQt6.QtWidgets import *
from FreezeUI_U.editor_window import *
import os
assets_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "assets")
include_folder_list = []
include_file_list = []
scroll_bar_stylesheet = """QScrollBar:vertical {
border: 1px solid #aaa;
border-radius:2px;
background:gray;
width:13px;
margin: 0px 3px 0px 0px;
}
QScrollBar::handle:vertical {
background: qlineargradient(x1:0, y1:0, x2:1, y2:0,
stop: 0 #3fe433, stop: 1 #3febe8);
min-height: 0px;
}
QScrollBar::add-line:vertical {
background: qlineargradient(x1:0, y1:0, x2:1, y2:0,
stop: 0 rgb(32, 47, 130), stop: 0.5 rgb(32, 47, 130), stop:1 rgb(32, 47, 130));
height: 0px;
subcontrol-position: bottom;
subcontrol-origin: margin;
}
QScrollBar::sub-line:vertical {
background: qlineargradient(x1:0, y1:0, x2:1, y2:0,
stop: 0 rgb(32, 47, 130), stop: 0.5 rgb(32, 47, 130), stop:1 rgb(32, 47, 130));
height: 0 px;
subcontrol-position: top;
subcontrol-origin: margin;
}
"""
class UiMsiWindow(QMainWindow):
def __init__(self):
super().__init__()
self.resize(725, 600)
self.setStyleSheet("background : #03203C")
self.centralwidget = QWidget(self)
self.setCentralWidget(self.centralwidget)
self.setWindowTitle("FreezeUI")
self.setWindowIcon(QIcon(f"{assets_folder}/pyicon.svg"))
font = QFont()
font.setFamily("Yu Gothic UI")
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
# creating widgets
self.scrollArea = QScrollArea(self.centralwidget)
self.scrollArea.verticalScrollBar().setStyleSheet(scroll_bar_stylesheet)
self.scrollArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.scrollArea.setWidgetResizable(True)
self.scrollAreaWidgetContents = QWidget()
self.verticalLayout = QVBoxLayout(self.scrollAreaWidgetContents)
self.groupBox = QGroupBox(self.scrollAreaWidgetContents)
self.app_edit = QLineEdit(self.groupBox)
self.version_edit = QLineEdit(self.groupBox)
self.copyright_edit = QLineEdit(self.groupBox)
self.description_edit = QLineEdit(self.groupBox)
self.icon_edit = QLineEdit(self.groupBox)
self.icon_browser_button = QPushButton(self.groupBox)
self.python_script_edit = QLineEdit(self.groupBox)
self.python_script_browser_button = QPushButton(self.groupBox)
self.include_modules_list = QLineEdit(self.groupBox)
self.exclude_modules_list = QLineEdit(self.groupBox)
self.groupBox_2 = QGroupBox(self.scrollAreaWidgetContents)
self.upgrade_code_edit = QLineEdit(self.groupBox_2)
self.file_include_edit = QLineEdit(self.groupBox_2)
self.include_files_button = QPushButton(self.groupBox_2)
self.folder_include_edit = QLineEdit(self.groupBox_2)
self.include_folders_button = QPushButton(self.groupBox_2)
self.author_edit = QLineEdit(self.groupBox_2)
self.add_to_path = QCheckBox(self.groupBox_2)
self.add_to_path.setCheckable(True)
self.add_to_path.setChecked(False)
self.install_for_all = QCheckBox(self.groupBox_2)
self.console_app = QCheckBox(self.groupBox)
self.console_app.setChecked(True)
self.verticalLayout.addWidget(self.groupBox)
self.verticalLayout.addWidget(self.groupBox_2)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.frame = QFrame(self.centralwidget)
self.frame.setFrameShape(QFrame.StyledPanel)
self.frame.setFrameShadow(QFrame.Raised)
self.freezeui_gif = QLabel(self.frame)
self.freezeui_gif.setEnabled(True)
gif_icon = QMovie(f'{assets_folder}/icon.gif')
gif_icon.setScaledSize(QSize(120, 120))
self.freezeui_gif.setMovie(gif_icon)
gif_icon.start()
self.so_easy_text = QLabel(self.frame)
self.so_easy_text.setPixmap(QPixmap(f"{assets_folder}/msi_text.png"))
self.so_easy_text.setScaledContents(True)
self.script_gen_button = QPushButton(self.centralwidget)
# setting text and placeholder text to widgets
self.groupBox.setTitle("App Settings")
self.groupBox_2.setTitle("MSI settings")
self.app_edit.setPlaceholderText("App Name")
self.version_edit.setPlaceholderText("Version")
self.copyright_edit.setPlaceholderText("Copyright")
self.description_edit.setPlaceholderText("Description")
self.icon_edit.setPlaceholderText("Icon")
self.python_script_edit.setPlaceholderText("Path to python script")
self.author_edit.setPlaceholderText("Author")
self.include_modules_list.setPlaceholderText("Modules to be included(seperated by spaces)")
self.exclude_modules_list.setPlaceholderText("Modules to be excluded(seperated by spaces)")
self.upgrade_code_edit.setPlaceholderText("Upgrade Code")
self.file_include_edit.setPlaceholderText("Files needed to be included(list)")
self.folder_include_edit.setPlaceholderText("Folders needed to be included(list)")
self.include_folders_button.setText("BROWSE")
self.add_to_path.setText("Add to path")
self.include_files_button.setText("BROWSE")
self.script_gen_button.setText("Generate Script")
self.icon_browser_button.setText("BROWSE")
self.python_script_browser_button.setText("BROWSE")
self.install_for_all.setText("Install for all users")
self.console_app.setText("Show console")
# setting stylesheet to widgets
self.script_gen_button.setStyleSheet(
"border-radius:5px;background:qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #FD297A , stop: 1 #9424F0)")
self.file_include_edit.setStyleSheet(
"background-color:white;border-radius: 4px;border-color:black;color:black")
self.upgrade_code_edit.setStyleSheet(
"background-color:white;border-radius: 4px;border-color:black;color:black")
self.include_files_button.setStyleSheet(
"border-radius:5px;background:qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #FD297A , stop: 1 #9424F0)")
self.folder_include_edit.setStyleSheet(
"background-color:white;border-radius: 4px;border-color:black;color:black")
self.groupBox_2.setStyleSheet("color:white")
self.include_folders_button.setStyleSheet(
"border-radius:5px;background:qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #FD297A , stop: 1 #9424F0)")
self.freezeui_gif.setStyleSheet("background:red")
self.groupBox.setStyleSheet("color:white")
self.exclude_modules_list.setStyleSheet(
"background-color:white;border-radius: 4px;border-color:black;color:black")
self.app_edit.setStyleSheet(
"background-color:white;border-radius: 4px;border-color:black;color:black")
self.version_edit.setStyleSheet(
"background-color:white;border-radius: 4px;border-color:black;color:black")
self.icon_browser_button.setStyleSheet(
"border-radius:5px;background:qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #FD297A , stop: 1 #9424F0)")
self.copyright_edit.setStyleSheet(
"background-color:white;border-radius: 4px;border-color:black;color:black")
self.description_edit.setStyleSheet(
"background-color:white;border-radius: 4px;border-color:black;color:black")
self.icon_edit.setStyleSheet(
"background-color:white;border-radius: 4px;border-color:black;color:black")
self.author_edit.setStyleSheet(
"background-color:white;border-radius: 4px;border-color:black;color:black")
self.python_script_edit.setStyleSheet(
"background-color:white;border-radius: 4px;border-color:black;color:black")
self.python_script_browser_button.setStyleSheet(
"border-radius:5px;background:qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #FD297A , stop: 1 #9424F0)")
self.include_modules_list.setStyleSheet(
"background-color:white;border-radius: 4px;border-color:black;color:black")
# setting geometry to widgets
self.scrollArea.setGeometry(QRect(20, 150, 690, 340))
self.scrollArea.setMinimumSize(QSize(0, 390))
self.scrollArea.setSizeIncrement(QSize(0, 300))
self.scrollAreaWidgetContents.setGeometry(QRect(0, -396, 672, 734))
self.groupBox.setMinimumSize(QSize(0, 450))
self.app_edit.setGeometry(QRect(10, 20, 500, 30))
self.app_edit.setMinimumSize(QSize(500, 30))
self.version_edit.setGeometry(QRect(10, 70, 500, 30))
self.version_edit.setMinimumSize(QSize(500, 30))
self.copyright_edit.setGeometry(QRect(10, 120, 500, 30))
self.copyright_edit.setMinimumSize(QSize(500, 30))
self.description_edit.setGeometry(QRect(10, 170, 500, 30))
self.icon_edit.setGeometry(QRect(10, 220, 500, 30))
self.icon_browser_button.setGeometry(QRect(530, 220, 110, 30))
self.icon_browser_button.setMinimumSize(QSize(0, 30))
self.python_script_edit.setGeometry(QRect(10, 270, 500, 30))
self.python_script_browser_button.setGeometry(QRect(530, 270, 110, 30))
self.python_script_browser_button.setMinimumSize(QSize(0, 30))
self.include_modules_list.setGeometry(QRect(10, 320, 630, 30))
self.exclude_modules_list.setGeometry(QRect(10, 370, 630, 30))
self.groupBox_2.setMinimumSize(QSize(0, 300))
self.upgrade_code_edit.setGeometry(QRect(10, 20, 630, 30))
self.file_include_edit.setGeometry(QRect(10, 70, 500, 30))
self.include_files_button.setGeometry(QRect(530, 70, 110, 30))
self.include_files_button.setMinimumSize(QSize(0, 30))
self.folder_include_edit.setGeometry(QRect(10, 120, 500, 30))
self.include_folders_button.setGeometry(QRect(530, 120, 110, 30))
self.include_folders_button.setMinimumSize(QSize(0, 30))
self.author_edit.setGeometry(QRect(10, 170, 500, 30))
self.add_to_path.setGeometry(QRect(10, 210, 140, 30))
self.install_for_all.setGeometry(QRect(10, 250, 170, 30))
self.frame.setGeometry(QRect(22, 9, 580, 130))
self.frame.setMinimumSize(QSize(0, 130))
self.freezeui_gif.setGeometry(QRect(-1, 0, 120, 120))
self.so_easy_text.setGeometry(QRect(200, 20, 370, 80))
self.script_gen_button.setGeometry(QRect(200, 550, 350, 30))
self.script_gen_button.setMinimumSize(QSize(0, 30))
self.console_app.move(10,410)
# setting font to widgets
self.app_edit.setFont(font)
self.version_edit.setFont(font)
self.copyright_edit.setFont(font)
self.description_edit.setFont(font)
self.icon_edit.setFont(font)
self.icon_browser_button.setFont(font)
self.python_script_edit.setFont(font)
self.python_script_browser_button.setFont(font)
self.include_modules_list.setFont(font)
self.exclude_modules_list.setFont(font)
self.upgrade_code_edit.setFont(font)
self.file_include_edit.setFont(font)
self.include_files_button.setFont(font)
self.folder_include_edit.setFont(font)
self.include_folders_button.setFont(font)
self.author_edit.setFont(font)
font.setPointSize(12)
self.add_to_path.setFont(font)
self.install_for_all.setFont(font)
self.console_app.setFont(font)
self.script_gen_button.setFont(font)
# triggering actions
self.icon_browser_button.clicked.connect(self.exe_icon_function)
self.python_script_browser_button.clicked.connect(self.python_script_browser)
self.include_folders_button.clicked.connect(self.include_folders)
self.include_files_button.clicked.connect(self.include_files)
self.script_gen_button.clicked.connect(self.generate_script)
self.show()
def exe_icon_function(self):
icon_file_dialogue, _ = QFileDialog.getOpenFileName(
self, "Select Icon", filter="Icons(*.ico)")
if icon_file_dialogue:
self.icon_edit.setText(icon_file_dialogue)
def python_script_browser(self):
python_file_dialogue, _ = QFileDialog.getOpenFileName(
self, "Select Python Script", filter="Python file (*.py)")
if python_file_dialogue:
self.python_script_edit.setText(python_file_dialogue)
def include_folders(self):
select_folder = QFileDialog.getExistingDirectory(self, "Select folder")
if select_folder:
include_folder_list.append(select_folder)
self.folder_include_edit.setText(f"{include_folder_list}")
def include_files(self):
select_files, _ = QFileDialog.getOpenFileNames(
self, "Select Files", filter="All files (*.*)")
if select_files:
for i in select_files:
include_file_list.append(i)
self.file_include_edit.setText(f"{include_file_list}")
def generate_script(self):
app_name = self.app_edit.text().strip()
msi_author = self.author_edit.text().strip()
version = str(None) if self.version_edit.text() == "" else f'"{self.version_edit.text().strip()}"'
copyright = self.copyright_edit.text().strip()
description = self.description_edit.text().strip()
exe_icon = str(None) if self.icon_edit.text() == "" else f'"{self.icon_edit.text().strip()}"'
python_file = self.python_script_edit.text().strip()
package_list = str([]) if self.include_modules_list.text() == "" else str(self.include_modules_list.text().strip().split(' '))
exclude_modules_list = str([]) if self.exclude_modules_list.text() == "" else str(self.exclude_modules_list.text().strip().split(' '))
upgrade_code = str(None) if self.upgrade_code_edit.text() == "" else self.upgrade_code_edit.text().strip()
include_folders = self.folder_include_edit.text().strip()
include_files = self.file_include_edit.text().strip()
add_to_path = str(True) if self.add_to_path.isChecked() else str(False)
install_for_all = str(True) if self.install_for_all.isChecked() else str(False)
includes = str([])
base = str(None) if self.console_app.isChecked() else '"Win32GUI"'
guid = "None" if upgrade_code == "None" else "{%s}" % upgrade_code
if include_files != "" and include_folders != "":
includes = include_files.strip("]")+","+include_folders.strip("[")
if include_files == "" and include_folders != "":
includes = include_folders
if include_files != "" and include_folders == "":
includes = include_files
if python_file == "":
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Please select python file (.py) ")
msg.exec()
return
if not os.path.exists(python_file):
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Python file does not exists")
msg.exec()
return
script = open(f"{assets_folder}/msi_template.txt").read()
converted_script = (
script.replace("description-", description)
.replace("version-", version)
.replace("copyright-", copyright)
.replace("exe_icon-", exe_icon)
.replace("upgrade_code-", guid)
.replace("add_to_path-", add_to_path)
.replace("app_name-", app_name)
.replace("exclude_module-", exclude_modules_list)
.replace("include_module-", package_list)
.replace("python_file-", python_file)
.replace("include_files-", includes)
.replace("author-", msi_author)
.replace("all_users-", install_for_all)
.replace("base-",base)
)
self.show_editor(converted_script, python_file)
def show_editor(self, script, python_file):
self.editor_window = EditorWindow(script, python_file, "MSI")
self.editor_window.show()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = UiMsiWindow()
sys.exit(app.exec()) | PypiClean |
/Anilius-2.0.3rc1.tar.gz/Anilius-2.0.3rc1/src/anilius/core/serializer.py | from collections import OrderedDict
from anilius.core.serializer_field import SerializerField
from google.protobuf.message import Message
from google.protobuf.internal.containers import RepeatedScalarFieldContainer
class SerializerMetaclass(type):
"""
This metaclass sets a dictionary named `_declared_fields` on the class.
Any instances of `Field` included as attributes on either the class
or on any of its superclasses will be include in the
`_declared_fields` dictionary.
"""
@classmethod
def _get_declared_fields(mcs, bases, attrs):
fields = [
(field_name, attrs.pop(field_name))
for field_name, obj in list(attrs.items())
if isinstance(obj, SerializerField)
]
fields.sort(key=lambda x: x[1].get_creation_counter())
# Ensures a base class field doesn't override cls attrs, and maintains
# field precedence when inheriting multiple parents. e.g. if there is a
# class C(A, B), and A and B both define 'field', use 'field' from A.
known = set(attrs)
def visit(name):
known.add(name)
return name
base_fields = [
(visit(name), f)
for base in bases
if hasattr(base, "_declared_fields")
for name, f in getattr(base, "_declared_fields").items()
if name not in known
]
return OrderedDict(base_fields + fields)
def __new__(mcs, name, bases, attrs):
attrs["_declared_fields"] = mcs._get_declared_fields(bases, attrs)
return super().__new__(mcs, name, bases, attrs)
class Serializer(SerializerField, metaclass=SerializerMetaclass):
_declared_fields = None
def __init__(self, request):
super().__init__()
assert isinstance(request, Message), "Request should be type of Message"
for field in request.ListFields():
if field[0].name in self._declared_fields:
raw_value = getattr(request, field[0].name)
raw_value = self.extract_message(raw_value)
self._declared_fields[field[0].name].set_raw_value(raw_value)
def extract_message(self, raw_value):
if isinstance(raw_value, Message):
raw_dict = {}
for field in raw_value.ListFields():
raw_value = getattr(raw_value, field[0].name)
raw_value = self.extract_message(raw_value)
raw_dict[field[0].name] = raw_value
raw_value = raw_dict
elif type(raw_value) is RepeatedScalarFieldContainer:
raw_list = []
for element in raw_value:
raw_list.append(self.extract_message(element))
raw_value = raw_list
print(type(raw_value))
return raw_value
def validate(self):
return True
def get_value(self):
return self.to_dict()
def get_declared_fields(self):
return self._declared_fields
def to_dict(self):
return dict(self.get_declared_fields()) | PypiClean |
/Ax_Metrics-0.9.2.tar.gz/Ax_Metrics-0.9.2/py/axonchisel/metrics/io/emfetch/base.py |
import collections
from axonchisel.metrics.foundation.ax.obj import AxObj
from axonchisel.metrics.foundation.ax.plugin import AxPluginBase
from axonchisel.metrics.foundation.chrono.timerange import TimeRange
from axonchisel.metrics.foundation.metricdef.metricdef import MetricDef
from axonchisel.metrics.foundation.data.point import DataPoint
from .tmrange_time_t import TimeRange_time_t
from .interface import EMFetcher
# ----------------------------------------------------------------------------
class EMFetcherBase(EMFetcher, AxPluginBase):
"""
EMFetch (Extensible Metrics Fetch) Plugin Superclass Base.
See EMFetcher interface class for detailed docs.
"""
def __init__(self, mdef, extinfo=None):
"""
Initialize around specific MetricDef and optional extinfo dict.
"""
# Default state:
self._mdef = None # MetricDef from config
self._tmrange = None # TimeRange transient storage per fetch
# Superclass init:
AxPluginBase.__init__(self)
# Validate, store MetricDef in self._mdef:
self._assert_type("mdef", mdef, MetricDef)
mdef.validate() # (raises TypeError, ValueError)
self._mdef = mdef
# Pass options to superclass:
self.configure(options = self.mdef.emfetch_opts, extinfo = extinfo)
#
# Public Methods
#
def fetch(self, tmrange):
"""
Invoked by MQEngine to fetch an individual data point.
May be called multiple times to load multiple data points.
Validates input, calls plugin_fetch(), validates, returns DataPoint.
"""
# Validate and cache input:
self._assert_type("tmrange", tmrange, TimeRange)
tmrange.validate()
self._tmrange = TimeRange_time_t(tmrange)
# Defer to plugin abstract method to fetch:
dpoint = self.plugin_fetch(tmrange)
# Validate result DataPoint:
self._assert_type("result", dpoint, DataPoint)
return dpoint
#
# Public Properties
#
@property
def mdef(self):
"""MetricDef we operate on (get only)."""
return self._mdef
#
# Protected Methods for Subclasses
#
def _format_str(self, fmt, what='?', od_defaults = Exception):
"""
Override from AxPluginBase -
Format a string using options, extinfo, and extra context (if any).
Protected wrapper for Python str.format.
"""
context = dict()
context['mdef'] = self._mdef
context['tmrange'] = self._tmrange
fmt = TimeRange_time_t.patch_format_str(fmt, ('tmrange',))
return AxPluginBase._format_str(self, fmt,
context=context, what=what, od_defaults=od_defaults)
#
# Internal Methods
#
def __unicode__(self):
return (u"{cls}({self.mdef})"
).format(self=self, cls=self.__class__.__name__,
)
# ---------------------------------------------------------------------------- | PypiClean |
/Gitbigcommits-1.0.3-py3-none-any.whl/gitbigcommits/report_layer/git_html_report.py | from Cheetah.Template import Template
from datetime import datetime
from gitbigcommits.core.git_commit_utility import GitFatCheckutility
import os
import sys
import pkg_resources
import logging
logging.basicConfig()
def get_output_list(git_folder=None, threshold_size=None):
'''
:param git_folder: Location of git repo folder.
:param threshold_size: size in KB
:return: list of dictionary with all info
This will generate a list of the big commits in your
repo. The input to the methos is your git directory,
and the threshold file size.
It can be set in the enviornmental variable "GIT_FOLDER_DIR" and
"GIT_FILE_SIZE"
eg:
export GIT_FOLDER_DIR="usr/home/myrepo/"
export GIT_FILE_SIZE="1024" size is in Kb
'''
if not git_folder:
if (len(sys.argv)) != 3:
print ('''Format should be:: bigcommits-html "<GitFolderPath>"
<ThresholdSize> \n GitFolderPath denotes where ".git" folder is
present''')
return
args = sys.argv[1:]
git_folder = args[0]
threshold_size = args[1]
git_utility = GitFatCheckutility(git_folder, threshold_size)
list_of_large_files = git_utility.get_all_file_with_info()
return list_of_large_files
def console_output():
'''
:return: None
The output will be printed in the std output
This will be called when the command-line command `cygitcheck` is invoked.
use: cygitcheck <GitFolderPath> <ThresholdSize>
GitFolderPath denotes where ".git" folder is
present
Eg: cygitcheck '/usr/home/viswesh/myrepo' 1024
'''
if (len(sys.argv)) != 3:
print ('''Format should be:: bigcommits "<GitFolderPath>"
<ThresholdSize> \n GitFolderPath denotes where ".git" folder is
present''')
return
args = sys.argv[1:]
folder_to_check = args[0]
size = args[1]
list_of_large_files = get_output_list(folder_to_check, size)
for row in list_of_large_files:
for key, val in row.items():
print (key, ":", val)
print ("\n")
def fat_html_output():
"""
This writes the output into a html file 'git_fat_files.html'.
"""
list_of_large_files = get_output_list()
template_name = os.getenv("FAT_TEMPLATE_NAME", "fat_file_user_info.html")
template_path = pkg_resources.resource_filename("gitbigcommits.miscellaneous",
template_name)
template = Template(file=template_path,
searchList=[{
'TODAY_DATE': datetime.now().strftime('%Y-%m-%d'),
'LIST_OF_BIGFILES': list_of_large_files
}
])
with open('git_fat_files.html', 'w') as html_file:
html_file.write(str(template))
print("Successfully generated html, file name is : %s" % template_name)
def dorm_branch_html_output():
'''
This is to find the dormant branch present in your repo.
Input to this method is two environmental variables.
1)GIT_FOLDER_DIR
2)GIT_DORMANT_TIME default value is 50 days. set to 0 to find all branch
info
:return: None4320000
'''
if (len(sys.argv)) != 3:
print ('''Format should be:: dbranch-html "<GitFolderPath>"
<ThresholdSize> \n GitFolderPath denotes where ".git" folder is
present''')
return
args = sys.argv[1:]
git_folder = args[0]
dormant_time = args[1]
git_utility = GitFatCheckutility(git_folder, "1")
dormant_time = float(dormant_time if dormant_time else "4320000")
branch_list_info = git_utility.domant_branch_info(dormant_time)
template_name = os.getenv("DORM_BRANCH_TEMPLATE_NAME",
"dorm_branch_info.html")
template_path = pkg_resources.resource_filename(
"gitbigcommits.miscellaneous",
template_name)
template = Template(file=template_path,
searchList=[{
'TODAY_DATE': datetime.now().strftime('%Y-%m-%d'),
'LIST_OF_BRANCHINFO': branch_list_info
}
])
with open('git_dorm_branch.html', 'w') as html_file:
html_file.write(str(template))
print("Successfully generated html, file name is : %s" % template_name) | PypiClean |
/MetaSBT-0.1.2.tar.gz/MetaSBT-0.1.2/metasbt/modules/utils.py | __author__ = "Fabio Cumbo ([email protected])"
__version__ = "0.1.0"
__date__ = "Apr 28, 2023"
import argparse as ap
import errno
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
from ast import literal_eval
from collections.abc import Callable
from logging import Logger
from logging.config import dictConfig
from pathlib import Path
from typing import Any, Dict, List, Optional, TextIO, Tuple, Union
import numpy as np # type: ignore
# Define the list of dependencies
# This is never used but helps to keep track of the external
# software dependencies required by the functions implemented here
DEPENDENCIES = [
"checkm",
"howdesbt",
"kitsune",
"ntcard",
"wget",
]
# Define the list of supported extensions for compressed files
# .rrr compression is used by HowDeSBT only
# Everything else can be Gzip compressed only
COMPRESSED_FILES = [
".gz",
".rrr",
]
# Define the list of supported extensions for uncompressed files
UNCOMPRESSED_FILES = [
".fa",
".fna",
".fasta",
".bf",
".txt",
".tsv",
]
def bfaction(
genomes: List[str],
tmpdir: str,
kmer_len: int,
min_occurrences: int = 2,
filter_size: Optional[int] = None,
nproc: int = 1,
action: str = "bfdistance",
mode: str = "theta",
) -> Union[Dict[str, Dict[str, float]], Dict[str, int]]:
"""
bfdistance and bfoperate wrapper
:param genomes: List with paths to the genome or bloom filter files
:param tmpdir: Path to the temporary folder with bloom filters
:param kmer_len: Kmer length
:param min_occurrences: Exclude kmers with a number of occurrences less than this param
:param filter_size: Bloom filter size
:param nproc: Make it parallel
:param action: "bfoperate" or "bfdistance"
:param mode: bfoperate modes: "and", "or", "xor", "eq", and "not"
bfdistance modes: "hamming", "intersect", "union", and "theta"
:return: Dictionary with the result of bfdistance or bfoperate
"""
# Define supported actions
actions = ["bfoperate", "bfdistance"]
if action not in actions:
raise Exception('Unsupported action "{}"!'.format(action))
mode = mode.lower()
# Define supported modes
bfoperate_modes = ["and", "or", "xor", "eq", "not"]
bfdistance_modes = ["hamming", "intersect", "union", "theta"]
if (action == "bfoperate" and mode not in bfoperate_modes) or (
action == "bfdistance" and mode not in bfdistance_modes
):
raise Exception('Unsupported mode "{}" for action "{}"!'.format(mode, action))
# Check whether the input genomes exist
for filepath in genomes:
if not os.path.isfile(filepath):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), filepath)
# Keep going in case of 2 or more input genomes
if len(genomes) < 2:
raise Exception("The number of input genomes must be >2!")
# Check whether the temporary folder exists, otherwise create it
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir, exist_ok=True)
if not filter_size:
# Estimate the bloom filter size with ntCard
filter_size = estimate_bf_size(
genomes,
kmer_len=kmer_len,
min_occurrences=min_occurrences,
prefix="genomes",
tmp_dir=tmpdir,
nproc=nproc
)
# Take track of all the bloom filter file paths
bf_files = list()
howdesbt_log_filepath = os.path.join(tmpdir, "howdesbt.log")
howdesbt_log = open(howdesbt_log_filepath, "w+")
# Build the bloom filters
for genome_path in genomes:
# Retrieve genome file info
_, genome_name, extension, compression = get_file_info(genome_path)
# Define the uncompressed genome path
genome_file = os.path.join(tmpdir, "{}{}".format(genome_name, extension))
if not os.path.exists(genome_file):
if not compression:
# Make a symbolic link in case of an uncompressed file
os.symlink(genome_path, genome_file)
else:
# Uncompress the genome file
with open(genome_file, "w+") as file:
run(["gzip", "-dc", genome_path], stdout=file, stderr=file)
# Define the bloom filter file path
bf_filepath = os.path.join(tmpdir, "{}.bf".format(genome_name))
if not os.path.exists(bf_filepath):
# Build the bloom filter representation of the genome
run(
[
"howdesbt",
"makebf",
"--k={}".format(kmer_len),
"--min={}".format(min_occurrences),
"--bits={}".format(filter_size),
"--hashes=1",
"--seed=0,0",
genome_file,
"--out={}".format(bf_filepath),
"--threads={}".format(nproc),
],
stdout=howdesbt_log,
stderr=howdesbt_log,
)
if os.path.isfile(bf_filepath):
bf_files.append(bf_filepath)
dist = dict()
with tempfile.NamedTemporaryFile() as bflist, tempfile.NamedTemporaryFile() as bfaction_out:
# Dump the list of bloom filter file paths
with open(bflist.name, "wt") as bflist_file:
for filepath in bf_files:
bflist_file.write("{}\n".format(filepath))
with open(bfaction_out.name, "wt") as bfaction_out_file:
if action == "bfdistance":
run(
[
"howdesbt",
"bfdistance",
"--list={}".format(bflist.name),
"--show:{}".format(mode),
],
stdout=bfaction_out_file,
stderr=howdesbt_log,
)
# Retrieve the output of howdesbt bfdistance
with open(bfaction_out.name) as bfaction_out_file:
for line in bfaction_out_file:
line = line.strip()
if line:
# This is require to replace consecutive space instances with a single space
line_split = " ".join(line.split()).split(" ")
# Get genome names
_, genome1, _, _ = get_file_info(line_split[0].split(":")[0], check_exists=False)
_, genome2, _, _ = get_file_info(line_split[1].split(":")[0], check_exists=False)
# Remove non informative fields
if line_split[-1] == "({})".format("intersection" if mode == "intersect" else mode):
line_split = " ".join(line_split[:-1]).strip().split(" ")
if genome1 not in dist:
dist[genome1] = dict()
# Get distance
dist[genome1][genome2] = float(line_split[-1])
elif action == "bfoperate":
run(
[
"howdesbt",
"bfoperate",
"--list={}".format(bflist.name),
"--noout",
"--{}".format(mode),
"--report:counts",
],
stdout=bfaction_out_file,
stderr=howdesbt_log,
)
# Retrieve the output of howdesbt bfoperate
with open(bfaction_out.name) as bfaction_out_file:
for line in bfaction_out_file:
line = line.strip()
if line:
line_split = line.split(" ")
key = "result"
if line_split[0] != key:
# Get genome name
_, key, _, _ = get_file_info(line_split[0], check_exists=False)
# Get active bits
dist[key] = int(line_split[-3])
# Close the log
howdesbt_log.close()
return dist
def build_sh(argv: List[str], module: str, outfolder: str) -> None:
"""
Build a sh script with the command line used to launch a module
:param argv: List of arguments
:param module: Module ID
:param outfolder: Output folder path
"""
with open(os.path.join(outfolder, "{}.sh".format(module)), "w+") as sh:
sh.write("#!/bin/bash\n\n")
# Add metasbt
argv.insert(0, "metasbt")
# Replace the path to the python script with the module ID
argv[1] = module
# Finally build the command line
sh.write("{}\n".format(" ".join([os.path.abspath(v) if os.path.exists(v) else v for v in argv])))
def checkm(
genomes_paths: List[str],
tmp_dir: str,
file_extension: str = "fna.gz",
nproc: int = 1,
pplacer_threads: int = 1,
) -> List[str]:
"""
Run CheckM on a set of genomes
Organise genomes in chunks with 1000 genomes at most
:param genomes_paths: List of paths to the input genomes
:param tmp_dir: Path to the temporary folder
:param file_extension: Assume all genomes have the same file extension
:param nproc: Make the execution CheckM parallel
:param pplacer_threads: Maximum number of threads for pplacer
:return: Return the list of paths to the CheckM output tables
"""
# Define the output list of paths to the CheckM tables
output_tables = list()
# Check whether there is at least one genome path in list
if genomes_paths:
run_tmp_dir = os.path.join(tmp_dir, "tmp")
# Organise genomes
counter = 0
run_id = 1
os.makedirs(os.path.join(run_tmp_dir, "bins_{}".format(run_id)), exist_ok=True)
# Iterate over the list of paths to the genome files
for genome_path in genomes_paths:
# Reorganise genomes in chunks with 1000 genomes at most
if counter % 1000 > 0:
counter = 0
run_id += 1
os.makedirs(os.path.join(run_tmp_dir, "bins_{}".format(run_id)), exist_ok=True)
# Symlink genome files to the bins folder of the current chunk
os.symlink(
genome_path,
os.path.join(run_tmp_dir, "bins_{}".format(run_id), os.path.basename(genome_path)),
)
# Iterate over the genomes chunk folders
for bins_folder in Path(run_tmp_dir).glob("bins_*"):
if os.path.isdir(str(bins_folder)):
# Retrieve the run ID from the file path
run_id = int(os.path.splitext(os.path.basename(str(bins_folder)))[0].split("_")[-1])
# Create the run folder
run_dir = os.path.join(tmp_dir, "run_{}".format(run_id))
os.makedirs(run_dir, exist_ok=True)
# Define the output table path for the current run
table_path = os.path.join(tmp_dir, "run_{}.tsv".format(run_id))
try:
# Run CheckM
# TODO update to CheckM2
run(
[
"checkm",
"lineage_wf",
"-t",
nproc,
"-x",
file_extension,
"--pplacer_threads",
pplacer_threads,
"--tab_table",
"-f",
table_path,
bins_folder,
run_dir,
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
# Add the output table path to the output list
output_tables.append(table_path)
except Exception:
pass
return output_tables
def cluster(
genomes_list: List[str],
boundaries: Dict[str, Dict[str, Union[int, float]]],
manifest_filepath: str,
profiles_dir: str,
tmpdir: str,
outpath: str,
cluster_prefix: str = "MSBT",
min_occurrences: int = 2,
nproc: int = 1,
) -> Dict[str, str]:
"""
Define new clusters with the unassigned MAGs
:param genomes_list: List with paths to the unassigned genomes
:param boundaries: Boundaries table produced by the boundaries module
:param manifest_filepath: Path to the manifest file
:param profiles_dir: Path to the temporary folder with the genomes profiles defined by the profile module
:param tmpdir: Path to the temporary folder for building bloom filters
:param outpath: Path to the output file with the new assignments
:param cluster_prefix: Prefix of clusters numerical identifiers
:param min_occurrences: Exclude kmers with a number of occurrences less than this param
:param nproc: Make bfdistance parallel
:return: Return the assignments as a dictionary <genome_path, taxonomy>
Also return the list of paths to the unassigned genomes
"""
# Check whether the output file already exists
if os.path.isfile(outpath):
raise FileExistsError(errno.ENOENT, os.strerror(errno.ENOENT), outpath)
# Also check whether the input files already exist
# Otherwise, raise an exception
if not os.path.isfile(manifest_filepath):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), manifest_filepath)
if not os.path.isdir(profiles_dir):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), profiles_dir)
# Retrieve the kmer length, filter size, and clusters counter from the manifest file
manifest = load_manifest(manifest_filepath)
kmer_len = manifest["kmer_len"]
clusters_counter_manifest = manifest["clusters_counter"]
# Estimate the proper bloom filter size for the set of unassigned genomes
filter_size = estimate_bf_size(
genomes_list,
kmer_len=kmer_len,
min_occurrences=min_occurrences,
prefix="genomes",
tmp_dir=tmpdir,
nproc=nproc
)
# Retrieve the list of input genomes
genomes = [get_file_info(genome_path)[1] for genome_path in genomes_list]
# Start counting new clusters
clusters_counter = clusters_counter_manifest
# Define the list of taxonomic levels for sorting profiles
levels = ["kingdom", "phylum", "class", "order", "family", "genus", "species"]
level_ids = [lv[0] for lv in levels]
# Keep track of the already assigned genomes
assigned_taxa: Dict[str, List[str]] = dict()
assigned_genomes: List[str] = list()
# Keep track of those genomes that MetaSBT is not able to assign
unassigned: List[str] = list()
# Compute pair-wise distance between genomes as the number of common kmers
# This could take a while
bfdistance_intersect = bfaction(
genomes_list, tmpdir, kmer_len, filter_size=filter_size, nproc=nproc, action="bfdistance", mode="intersect"
)
# Iterate over genomes
for i in range(len(genomes_list)):
if genomes[i] not in assigned_genomes:
# Retrieve the genome profile
profile = os.path.join(profiles_dir, "{}__profiles.tsv".format(genomes[i]))
# Check whether the profile exists
if os.path.isfile(profile):
# Load levels and scores
level2match = dict()
with open(profile) as file:
for line in file:
line = line.strip()
if line:
if not line.startswith("#"):
line_split = line.split("\t")
if line_split[1] in levels:
# Key: taxonomic level
# Value: kmers in common with the taxonomic level
level2match[line_split[1]] = {
"taxonomy": line_split[2],
"common_kmers": int(line_split[3].split("/")[0])
}
if line_split[1] == "genome":
# Override the species level with strains info
level2match["species"] = {
"taxonomy": "|".join(line_split[2].split("|")[:-1]),
"common_kmers": int(line_split[3].split("/")[0])
}
assigned_taxonomy = None
last_known_level_mink = 0
# From the species up to the kingdom level
for level in reversed(levels):
# Get level boundaries
mink, _ = get_level_boundaries(boundaries, level2match[level]["taxonomy"])
if level2match[level]["common_kmers"] >= mink and mink > 0:
assigned_taxonomy = level2match[level]["taxonomy"]
last_known_level_mink = mink
break
if not assigned_taxonomy:
# Unable to assign a taxonomic label to the current genome
unassigned.append(genomes_list[i])
else:
assignment = assigned_taxonomy.split("|")
# Fill the assignment with missing levels
assigned_levels = len(assignment)
for pos in range(assigned_levels, len(levels)):
clusters_counter += 1
# Create new clusters
assignment.append("{}__{}{}".format(level_ids[pos], cluster_prefix, clusters_counter))
# Compose the assigned (partial) label
assigned_taxonomy = "|".join(assignment)
# Assigne current genome to the taxonomy
if assigned_taxonomy not in assigned_taxa:
assigned_taxa[assigned_taxonomy] = list()
assigned_taxa[assigned_taxonomy].append(genomes_list[i])
# Mark current genome as assigned
assigned_genomes.append(genomes[i])
# Check whether other input genomes look pretty close to the current genome by computing
# the number of kmers in common between the current genome and all the other input genomes
for j in range(i + 1, len(genomes_list)):
# Kmers in common have been already computed
# It returns a float by default
common = int(bfdistance_intersect[genomes[i]][genomes[j]])
if common >= last_known_level_mink:
# Set the second genome as assigned
assigned_genomes.append(genomes[j])
# Also assign these genomes to the same taxonomy assigned to the current genome
assigned_taxa[assigned_taxonomy].append(genomes_list[j])
# Update the manifest with the new clusters counter
if clusters_counter > clusters_counter_manifest:
# Load the manifest file
with open(manifest_filepath) as manifest_file:
manifest_lines = manifest_file.readlines()
# Update the --clusters-counter info
with open(manifest_filepath, "w+") as manifest_file:
for line in manifest_lines:
line = line.strip()
if line:
line_split = line.split(" ")
if line_split[0] == "--clusters-counter":
line_split[-1] = str(clusters_counter)
manifest_file.write("{}\n".format(" ".join(line_split)))
# Mapping genome -> taxonomy, cluster
assignment = dict()
# Dumpt the new assignments to the output file
with open(outpath, "w+") as out:
# Add header line
out.write("# Genome\tAssignment\tCluster ID\n")
for taxonomy in sorted(assigned_taxa.keys()):
for genome_path in sorted(assigned_taxa[taxonomy]):
# Get genome name
_, genome, _, _ = get_file_info(genome_path)
cluster_id = taxonomy.split("|")[-1][3:]
out.write("{}\t{}\t{}\n".format(genome, taxonomy, cluster_id))
# Take track of mapping genome - taxonomy
assignment[genome_path] = {
"taxonomy": taxonomy,
"cluster": cluster_id
}
return assignment, unassigned
def dereplicate_genomes(
genomes: list,
tax_id: str,
tmp_dir: str,
kmer_len: int,
filter_size: Optional[int] = None,
nproc: int = 1,
similarity: float = 1.0,
) -> List[str]:
"""
Dereplicate genomes
:param genomes: List of genome file paths
:param tax_id: NCBI tax ID
:param tmp_dir: Path to the temporary folder
:param kmer_len: Length of the kmers
:param filter_size: Size of the bloom filters
:param nproc: Make it parallel
:param similarity: Similarity threshold on the theta distance
Theta between two genomes A and B is defined as N/D, where
N is the number of 1s in common between A and B, and
D is the number of 1s in A
:return: List of genome file paths for genomes that passed the dereplication
"""
# Define the HowDeSBT temporary folder
howdesbt_tmp_dir = os.path.join(tmp_dir, "howdesbt", tax_id)
os.makedirs(howdesbt_tmp_dir, exist_ok=True)
filtered_genomes_filepath = os.path.join(howdesbt_tmp_dir, "filtered.txt")
filtered_genomes = list()
# Compute the theta distance between all the input genomes
bfdistance_theta = bfaction(
genomes, howdesbt_tmp_dir, kmer_len, filter_size=filter_size, nproc=nproc, action="bfdistance", mode="theta"
)
# Pair-wise comparison of input genomes
for i in range(len(genomes)):
for j in range(i + 1, len(genomes)):
# Get genome file names
_, genome1, _, _ = get_file_info(genomes[i])
_, genome2, _, _ = get_file_info(genomes[j])
excluded = None
if bfdistance_theta[genome1][genome2] >= similarity:
filtered_genomes.append(genomes[i])
excluded = genomes[i]
if bfdistance_theta[genome2][genome1] >= similarity:
filtered_genomes.append(genomes[j])
excluded = genomes[j]
if excluded:
# Also take note if the excluded genomes in the filtered file
with open(filtered_genomes_filepath, "a+") as f:
f.write("{}\n".format(excluded))
break
# Redefine the list of genomes by removing the filtered ones
genomes = list(set(genomes).difference(set(filtered_genomes)))
return genomes
def download(
url: Optional[str] = None,
urls: Optional[List[str]] = None,
folder: str = os.getcwd(),
retries: int = 10,
raise_exception: bool = True
) -> Optional[Union[str, List[str]]]:
"""
Download a file from URL to the specified folder
:param url: Source file URL
:param urls: List with source file URLs
:param folder: Target destination folder path
:param retries: Try downloading again in case of errors
:param raise_exception: Raise an exception in case of error
:return: Path or list of paths to the downloaded files
"""
if not url and not urls:
raise ValueError("No URLs provided")
# Check whether the destination folder path exists
if not os.path.isdir(folder):
os.makedirs(folder, exist_ok=True)
try:
if url:
# Download file from URL to the destination folder
run(
["wget", "-N", url, "-P", folder],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
retries=retries,
)
elif urls:
with tempfile.NamedTemporaryFile() as tmpfile:
# Dump the list of bloom filter file paths
with open(tmpfile.name, "wt") as tmpfile_list:
for url in urls:
tmpfile_list.write("{}\n".format(url))
# Download a list of files from URL
run(
["wget", "-N", "-i", tmpfile.name, "-P", folder],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
retries=retries,
)
except Exception as e:
if raise_exception:
raise Exception(
"An error has occurred while trying to download {}".format(url)
).with_traceback(e.__traceback__)
# This file does not seem really important after all
return None
return os.path.join(folder, url.split(os.sep)[-1])
def estimate_bf_size(
genomes: str,
kmer_len: int = 21,
min_occurrences: int = 2,
prefix: str = "genomes",
tmp_dir: str = os.getcwd(),
nproc: int = 1
) -> int:
"""
Estimate the bloom filter size with ntCard
:param genomes: List of paths to the genome files
:param kmer_len: Length of the kmers
:param min_occurrences: Exclude kmers with a number of occurrences less than this param
:param prefix: Prefix of the output histogram file
:param tmp_dir: Path to the temporary folder
:param nproc: Make it parallel
:return: The estimated bloom filter size
"""
os.makedirs(tmp_dir, exist_ok=True)
with tempfile.NamedTemporaryFile() as genomes_file:
# Dump the list of genome file paths
with open(genomes_file.name, "wt") as gfile:
for filepath in genomes:
gfile.write("{}\n".format(filepath))
# Estimate the bloom filter size with ntCard
run(
[
"ntcard",
"--kmer={}".format(kmer_len),
"--threads={}".format(nproc),
"--pref={}".format(os.path.join(tmp_dir, prefix)),
"@{}".format(genomes_file.name),
],
silence=True,
)
# Total number of kmers in the reads
F1 = 0
# Number of distinct kmers
F0 = 0
# List with the number of kmers occurring less than min_occurrences
fs = list()
# Read the ntCard output hist file
hist_filepath = os.path.join(tmp_dir, "{}_k{}.hist".format(prefix, kmer_len))
with open(hist_filepath) as histfile:
for line in histfile:
line = line.strip()
if line:
line_split = line.split()
if line_split[0] == "F1":
F1 = int(line_split[-1])
elif line_split[0] == "F0":
F0 = int(line_split[-1])
elif isinstance(line_split[0], int):
if int(line_split[0]) < min_occurrences:
fs.append(int(line_split[-1]))
else:
break
if F0 == 0:
# This could happen in case of a single very small genome
# Use F1 as the bloom filter size in this case
if F1 == 0:
raise Exception("Unable to estimate the bloom filter size: {}".format(hist_filepath))
return F1
# Estimate the bloom filter size
return F0 - sum(fs)
def filter_checkm_tables(
checkm_tables: List[str], completeness: float = 0.0, contamination: float = 100.0
) -> List[str]:
"""
Filter genomes according to completeness and contamination criteria
:param checkm_tables: List of paths to the CheckM output tables
:param completeness: Minimum allowed completeness
:param contamination: Maximum allowed contamination
:return: The list of genomes that passed the quality-control criteria
"""
# Define the list of genomes that passed the quality control
genomes = list()
# Iterate over the CheckM output tables
for filepath in checkm_tables:
if os.path.isfile(filepath):
with open(filepath) as table:
line_count = 0
for line in table:
line = line.strip()
if line:
# Always skip the first header line
if line_count > 0:
line_split = line.split("\t")
# Check whether the current genome respect both the completeness and contamination criteria
if float(line_split[-3]) >= completeness and float(line_split[-2]) <= contamination:
genomes.append(line_split[0])
line_count += 1
return genomes
def get_bf_density(filepath: str) -> float:
"""
Retrieve the bloom filter density
:param filepath: Path to the bloom filter file
:return: Density of the bloom filter
"""
density = 0.0
with tempfile.NamedTemporaryFile() as dumpbf:
# Retrieve bloom filter density
run(
[
"howdesbt",
"dumpbf",
filepath,
"--show:density",
],
stdout=dumpbf,
stderr=dumpbf,
)
try:
# Get the result
density = float(open(dumpbf.name, "rt").readline().strip().split(" ")[-1])
except Exception as ex:
raise Exception("An error has occurred while retrieving bloom filter density:\n{}".format(filepath)).with_traceback(
ex.__traceback__
)
return density
def get_boundaries(
bfs: List[str], tmpdir: str, kmer_len: int, filter_size: Optional[int] = None, nproc: int = 1
) -> Tuple[int, int, int]:
"""
Return kmers boundaries for a specific set of genomes defined as the minimum and
maximum number of common kmers among all the genomes in the current taxonomic level
:param genomes: List with paths to the bloom filter representations of the genomes
:param tmpdir: Path to the temporary folder
:param kmer_len: Kmer length
:param filter_size: Bloom filter size
:param nproc: Make it parallel
:return: Return a tuple with boundaries
Total number, minimum, and maximum amount of kmers in common among the input genomes
"""
# Search for the minimum and maximum number of common kmers among all the input genomes
kmers = 0
minv = np.Inf
maxv = 0
# Compute the number of kmers in common between all pairs of genomes
bfdistance_intersect = bfaction(
bfs, tmpdir, kmer_len, filter_size=filter_size, nproc=nproc, action="bfdistance", mode="intersect"
)
# Iterate over the bloom filters
for i in range(len(bfs)):
for j in range(i + 1, len(bfs)):
# Get genome file names
_, genome1, _, _ = get_file_info(bfs[i])
_, genome2, _, _ = get_file_info(bfs[j])
# Result is under key "result"
# It returns a float by default
common = int(bfdistance_intersect[genome1][genome2])
if common == 0:
# This could be only due to a wrong classification and must be reported
with open(os.path.join(tmpdir, "zero_common_kmers.tsv"), "a+") as zck:
zck.write("{}\t{}\n".format(genome1, genome2))
# Pass to the next comparison
continue
# Update the minimum and maximum number of common kmers
if common > maxv:
maxv = common
if common < minv:
minv = common
# Use bfoperate --or (union) to retrieve the total number of kmers
bfoperate_or = bfaction(bfs, tmpdir, kmer_len, filter_size=filter_size, nproc=nproc, action="bfoperate", mode="or")
# Result is under the key "result"
kmers = bfoperate_or["result"]
return kmers, minv, maxv
def get_file_info(filepath: str, check_supported: bool = True, check_exists: bool = True) -> Tuple[str, str, str, str]:
"""
Get file path, name, extension, and compression
:param filepath: Path to the input file
:return: File path, name, extension, and compression
"""
if check_exists and not os.path.isfile(filepath):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), filepath)
# Trim the folder path out
basename = os.path.basename(filepath)
# Split the basename
filename, extension = os.path.splitext(basename)
# Take track of the extension in case of compression
compression = None
# Check whether it is compressed
if extension in COMPRESSED_FILES:
compression = extension
filename, extension = os.path.splitext(filename)
# Check whether the input file is supported
if check_supported and extension not in UNCOMPRESSED_FILES:
raise Exception("Unrecognized input file")
# Retrieve the absolute path to the file folder
absdir = os.path.abspath(os.path.dirname(filepath))
return absdir, filename, extension, compression
def get_level_boundaries(boundaries: Dict[str, Dict[str, Union[int, float]]], taxonomy: str) -> Tuple[int, int]:
"""
Retrieve boundaries for a given taxonomic label
:param boundaries: Boundaries table produced by the boundaries module
:param taxonomy: Taxonomic label
:return: Taxonomy-specific boundaries
"""
minv = 0
maxv = 0
# Keep track of the min and max common kmers
min_bounds = list()
max_bounds = list()
# Try searching for boundaries again with a redefined taxonomic label
retry = False
while minv == 0 and maxv == 0:
taxonomic_boundaries = dict()
if not retry:
if taxonomy in boundaries:
# Exact search of the current taxonomic label in boundaries
taxonomic_boundaries[taxonomy] = boundaries[taxonomy]
else:
for tax in boundaries:
if tax.startswith("{}|".format(taxonomy)):
# Expand the search to all the taxonomies with a common prefix
taxonomic_boundaries[tax] = boundaries[tax]
if taxonomic_boundaries:
# In case the current taxonomy is in the boundaries file
for tax in taxonomic_boundaries:
min_bounds.append(taxonomic_boundaries[tax]["min_kmers"])
max_bounds.append(taxonomic_boundaries[tax]["max_kmers"])
minv = int(sum(min_bounds) / len(min_bounds))
maxv = int(sum(max_bounds) / len(max_bounds))
else:
# Split the taxonomic label into levels
taxonomic_levels = taxonomy.split("|")
if len(taxonomic_levels) == 1:
# Get out of the loop of there are no other levels available
break
# Redefine the taxonomic label
taxonomy = "|".join(taxonomic_levels[:-1])
# Retry
retry = True
return minv, maxv
def howdesbt(
level_dir: str,
extension: str = "fna.gz",
kmer_len: int = 21,
min_occurrences: int = 2,
filter_size: int = 10000,
nproc: int = 1,
flat_structure: bool = False,
) -> None:
"""
Run HowDeSBT on a specific taxonomic level
Genomes must be in the "genomes" folder under level_dir
:param level_dir: Path to the taxonomic level folder
:param extension: Input file extension
:param kmer_len: Length of the kmers
:param min_occurrences: Exclude kmers with a number of occurrences less than this param
:param filter_size: Size of the bloom filters
:param nproc: Make it parallel
:param flat_structure: Genomes are not taxonomically organized
"""
# Check whether the input folder exists
if not os.path.isdir(level_dir):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), level_dir)
# Extract the level name from the level folder path
level_name = os.path.basename(level_dir)
# Define the index folder
index_dir = os.path.join(level_dir, "index")
if os.path.isdir(index_dir):
# Remove old index folder if any
shutil.rmtree(index_dir, ignore_errors=True)
# Define the path to the file with the list of genome under the current taxonomic level
level_list = os.path.join(level_dir, "{}.txt".format(level_name))
if os.path.isfile(level_list):
os.unlink(level_list)
# Define the path to the bloom filter representation of the current taxonomic level
level_filter = os.path.join(level_dir, "{}.bf".format(level_name))
if os.path.isfile(level_filter):
os.unlink(level_filter)
# Define the log file
howdesbt_log_filepath = os.path.join(level_dir, "howdesbt.log")
howdesbt_log = open(howdesbt_log_filepath, "w+")
# Take track of how many genomes under the specific taxonomic levels
how_many = 0
if os.path.basename(level_dir).startswith("s__") or flat_structure:
# Search for all the genomes under the current taxonomic level
genomes_folder = os.path.join(level_dir, "genomes")
if os.path.isdir(genomes_folder):
# Create the filters folder
filters_dir = os.path.join(os.path.dirname(genomes_folder), "filters")
os.makedirs(filters_dir, exist_ok=True)
# Iterate over the genome files
for genome_path in Path(genomes_folder).glob("*.{}".format(extension)):
# Retrieve genome file info
_, genome_name, genome_extension, genome_compression = get_file_info(genome_path)
# Define the path to the bloom filter representation of the genome
bf_filepath = os.path.join(filters_dir, "{}.bf".format(genome_name))
if not os.path.isfile(bf_filepath):
# Define the uncompressed genome path
genome_file = os.path.join(genomes_folder, "{}{}".format(genome_name, genome_extension))
if genome_compression:
# Uncompress the genome file
with open(genome_file, "w+") as file:
run(["gzip", "-dc", genome_path], stdout=file, stderr=file)
# Build the bloom filter file from the current genome
run(
[
"howdesbt",
"makebf",
"--k={}".format(kmer_len),
"--min={}".format(min_occurrences),
"--bits={}".format(filter_size),
"--hashes=1",
"--seed=0,0",
genome_file,
"--out={}".format(bf_filepath),
"--threads={}".format(nproc),
],
stdout=howdesbt_log,
stderr=howdesbt_log,
)
if genome_compression:
# Get rid of the uncompressed genome file
os.unlink(genome_file)
filters_folder = os.path.join(level_dir, "filters")
if os.path.isdir(filters_folder):
# Take track of the bloom filter files
with open(level_list, "w+") as level_list_file:
for bf_filepath in Path(filters_folder).glob("*.bf"):
level_list_file.write("{}\n".format(bf_filepath))
# Increment the genomes counter
how_many += 1
else:
# Find all the other taxonomic levels
# Define the new list of bloom filters
for level in os.listdir(level_dir):
if os.path.isdir(os.path.join(level_dir, level)):
# Defile the path to the bloom filter file
bf_filepath = os.path.join(level_dir, level, "{}.bf".format(level))
if os.path.isfile(bf_filepath):
with open(level_list, "a+") as level_list_file:
level_list_file.write("{}\n".format(bf_filepath))
# Increment the genomes counter
how_many += 1
# Build the index folder
os.makedirs(index_dir, exist_ok=True)
# Move to the index folder
# This will force howdesbt to build the compressed nodes into the index folder
os.chdir(index_dir)
if how_many > 1:
# Create the tree topology file
run(
[
"howdesbt",
"cluster",
"--list={}".format(level_list),
"--bits={}".format(filter_size),
"--tree={}".format(os.path.join(index_dir, "union.sbt")),
"--nodename={}".format(os.path.join(index_dir, "node{number}")),
"--keepallnodes",
],
stdout=howdesbt_log,
stderr=howdesbt_log,
)
else:
# With only one bloom filter it does not make sense to cluster genomes
bf_filepath = [line.strip() for line in open(level_list).readlines() if line.strip()][0]
# There is only one line which refers to the only bloom filter file
shutil.copy(bf_filepath, os.path.join(level_dir, "{}.bf".format(level_name)))
# Manually define the union.sbt file with the single node
with open(os.path.join(index_dir, "union.sbt"), "w+") as union:
union.write("{}\n".format(bf_filepath))
# Build all the bloom filter files
run(
[
"howdesbt",
"build",
"--howde",
"--tree={}".format(os.path.join(index_dir, "union.sbt")),
"--outtree={}".format(os.path.join(index_dir, "index.detbrief.sbt")),
],
stdout=howdesbt_log,
stderr=howdesbt_log,
)
# Remove the union.sbt file
os.unlink(os.path.join(index_dir, "union.sbt"))
# Fix node paths in the final index.detbrief.sbt file
with open(os.path.join(index_dir, "index.full.detbrief.sbt"), "w+") as file1:
with open(os.path.join(index_dir, "index.detbrief.sbt")) as file2:
for line in file2:
line = line.strip()
if line:
# Define the depth of the node in the tree
stars = line.count("*")
# Remove the stars to retrieve the node name
node_name = line[stars:]
# Define the absolute path to the node bloom filter file
node_path = os.path.join(index_dir, node_name)
# Define the new node in the tree
file1.write("{}{}\n".format("*" * stars, node_path))
# Get rid of the old tree
os.unlink(os.path.join(index_dir, "index.detbrief.sbt"))
# Rename the new tree
shutil.move(
os.path.join(index_dir, "index.full.detbrief.sbt"),
os.path.join(index_dir, "index.detbrief.sbt"),
)
if how_many > 1:
# Build the bloom filter representation of the current taxonomic level
bf_filepath = os.path.join(level_dir, "{}.bf".format(level_name))
# Merge all the leaves together by applying the OR logic operator on the bloom filter files
# The resulting bloom filter is the representative one, which is the same as the root node of the tree
run(
[
"howdesbt",
"bfoperate",
"--list={}".format(level_list),
"--or",
"--out={}".format(bf_filepath),
],
stdout=howdesbt_log,
stderr=howdesbt_log,
)
# Close the log file handler
howdesbt_log.close()
def init_logger(filepath: Optional[str] = None, toolid: Optional[str] = None, verbose: bool = True) -> Optional[Logger]:
"""
Define a logger to print on console, on file, or both
:param filepath: Path to the log file
:param verbose: Print on screen
:return: Logger object or None
"""
# Define the logger config
# TODO configure other logging levels (i.e., NOTSET, DEBUG, INFO, WARN, ERROR, and CRITICAL)
logging_config: Dict[str, Any] = dict(
version=1,
formatters={
"verbose": {
"format": "[%(toolid)s][%(levelname)s][%(asctime)s] %(message)s",
"datefmt": "%d/%b/%Y %H:%M:%S",
}
},
handlers={
"console": {
"class": "logging.StreamHandler",
"level": "INFO",
"formatter": "verbose",
"stream": sys.stdout,
},
"file": {
"class": "logging.handlers.RotatingFileHandler",
"level": "INFO",
"formatter": "verbose",
"filename": os.devnull,
"maxBytes": 52428800,
"backupCount": 7,
},
},
loggers={
"console": {"handlers": ["console"], "level": logging.INFO},
"file": {"handlers": ["file"], "level": logging.INFO},
"full": {"handlers": ["console", "file"], "level": logging.INFO},
},
)
# In case of log file
if filepath:
# Check whether its folder exists
log_dir = os.path.dirname(filepath)
if not os.path.isdir(log_dir):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), log_dir)
# Update the log file path in the config dictionary
logging_config["handlers"]["file"]["filename"] = filepath
# Load the logging config
dictConfig(logging_config)
# Get the record factory
factory = logging.getLogRecordFactory()
# Customise the record_factory function to add the toolid attribute
def record_factory(*args, **kwargs):
record = factory(*args, **kwargs)
record.toolid = toolid
return record
# Register the new record factory
logging.setLogRecordFactory(record_factory)
# Define the logger type
logtype = None
if filepath and verbose:
# Full logger will print on screen and on the log file
logtype = "full"
elif filepath and not verbose:
# File logger will only print on the log file
logtype = "file"
elif not filepath and verbose:
# Console logger will only print message on the screen
logtype = "console"
if logtype:
# Define and return the logger object
logger = logging.getLogger(logtype)
return logger
# In case no file path and verbose have been specified
return None
def integrity_check(filepath) -> bool:
"""
This is for Gzipped files only
:param filepath: Path to the Gzipped file
:return: True if it passes the integrity check
"""
# This checks whether the input file exists and its extension and compression are supported
_, _, _, compression = get_file_info(filepath, check_supported=True, check_exists=True)
if compression != ".gz":
# Limit the compression to Gzipped files only
raise Exception("Unsupported file type")
try:
# It always throws an Exception in case of a return value > 0
run(["gzip", "-t", filepath], silence=True)
except Exception:
return False
return True
def load_boundaries(boundaries_filepath: str) -> Dict[str, Dict[str, Union[int, float]]]:
"""
Load the table produced by the boundaries module
:param boundaries_filepath: Path to the boundaries table
:return: Dictionary with the table content indexed by taxa
"""
if not os.path.isfile(boundaries_filepath):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), boundaries_filepath)
boundaries = dict()
with open(boundaries_filepath) as table:
for line in table:
line = line.strip()
if line:
if not line.startswith("#"):
line_split = line.split("\t")
# Indexed by taxonomic labels
boundaries[line_split[0]] = {
"clusters": int(line_split[1]),
"references": int(line_split[2]),
"all_kmers": int(line_split[3]),
"min_kmers": int(line_split[4]),
"max_kmers": int(line_split[5]),
"min_score": float(line_split[6]),
"max_score": float(line_split[7]),
}
return boundaries
def load_input_table(filepath: str, input_extension: str = "fna.gz") -> Dict[str, str]:
"""
Load the input table with the list of paths to the genome files and eventually their taxonomic labels
:param filepath: Path to the input file
:param input_extension: Input genome files extension
:return: A list with paths to the input genomes in case of MAGs or a dictionary with
genome paths and their taxonomic labels in case of reference genomes
"""
if not os.path.isfile(filepath):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), filepath)
genome2taxonomy: Dict[str, str] = dict()
with open(filepath) as input_file:
for line in input_file:
line = line.strip()
if line:
if not line.startswith("#"):
line_split = line.split("\t")
taxonomy = "NA"
if len(line_split) > 2:
raise Exception("Malformed input file! It must contain two columns at most")
elif len(line_split) == 2:
taxonomy = line_split[1]
if len(taxonomy.split("|")) != 7:
# Taxonomic labels must have 7 levels
raise Exception(
"Invalid taxonomic label! Please note that taxonomies must have 7 levels:\n{}".format(
line_split[1]
)
)
# This automatically check whether extension and compression are supported
dirpath, genome_name, extension, compression = get_file_info(line_split[0])
if not line_split[0].endswith(".{}".format(input_extension)):
raise Exception(
"Unexpected input file extension! "
"File: {}; Expected extension: {}".format(line_split[0], input_extension)
)
genome_path = os.path.join(dirpath, "{}{}{}".format(
genome_name, extension, compression if compression else ""
))
if genome_path in genome2taxonomy:
if genome2taxonomy[genome_path] != taxonomy:
raise Exception(
"Genome \"{}\" appears twice in the input file with two different taxonomic labels:\n{}\n{}".format(
genome_name, genome2taxonomy[genome_path], taxonomy
)
)
genome2taxonomy[genome_path] = taxonomy
taxonomy2genomes: Dict[str, List[str]] = dict()
if genome2taxonomy:
for genome_path in genome2taxonomy:
if genome2taxonomy[genome_path] not in taxonomy2genomes:
taxonomy2genomes[genome2taxonomy[genome_path]] = list()
taxonomy2genomes[genome2taxonomy[genome_path]].append(genome_path)
return taxonomy2genomes
def load_manifest(manifest_filepath: str) -> Dict[str, Union[str, int, float]]:
"""
Load the manifest file
:param manifest_filepath: Path to the manifest file
:return: Dictionary with manifest data
"""
if not os.path.isfile(manifest_filepath):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), manifest_filepath)
manifest = dict()
with open(manifest_filepath) as file:
for line in file:
line = line.strip()
if line:
line_split = line.split(" ")
# e.g., key: --kmer-len > kmer_len
key = line_split[0][2:].replace("-", "_")
try:
# Try to cast values to the appropriate type
manifest[key] = literal_eval(line_split[1])
except Exception:
# Otherwise, maintain value as string
manifest[key] = line_split[1]
return manifest
def number(
typev: type,
minv: Optional[Union[int, float]] = None,
maxv: Optional[Union[int, float]] = None,
) -> Callable:
"""
Take full control of input numeric types by defining custom intervals
"""
def type_func(value: Union[int, float]) -> Union[int, float]:
"""
Test data type and ranges on the input value
"""
try:
value = typev(value)
if minv and value < minv:
raise ap.ArgumentTypeError("Minimum value is {}".format(minv))
if maxv and value > maxv:
raise ap.ArgumentTypeError("Maximum value is {}".format(maxv))
return value
except Exception as e:
raise ap.ArgumentTypeError("Input value must be {}".format(typev)).with_traceback(e.__traceback__)
return type_func
def optimal_k(
genomes: List[str],
kl: int,
tmpdir: str,
closely_related: bool = False,
nproc: int = 1,
threads: int = 1
) -> int:
"""
Given a set of genomes, try to define the best k-mer length with kitsune
:param genomes: List with genome file paths (Gzip compressed or not)
:param kl: kitsune tests different k-mer lengths, starting from k=4 up to kl
:param tmpdir: Path to the temporary folder
:param closely_related: For closesly related genomes use this flag
:param nproc: Max number of processes
:param threads: Max number of threads
:return: Optimal k-mer length
"""
if len(genomes) < 2:
raise Exception("Not enough genomes")
if kl < 4:
raise ValueError("Initial k-mer length is too small")
# Check whether the destination folder path exists
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir, exist_ok=True)
# Take track of the genome file paths in the tmp folder
genomes_paths = list()
for genome_path in genomes:
_, genome_name, extension, compression = get_file_info(genome_path, check_supported=True, check_exists=True)
# Define the uncompressed genome path
genome_file = os.path.join(tmpdir, "{}{}".format(genome_name, extension))
if not compression:
# Make a symbolic link in case of an uncompressed file
os.symlink(genome_path, genome_file)
else:
# Uncompress the genome file
# It can always be Gzip compressed here
with open(genome_file, "w+") as file:
run(["gzip", "-dc", genome_path], stdout=file, stderr=file)
genomes_paths.append(genome_file)
if not genomes_paths:
raise Exception("Not enough genomes. Something went wrong while processing your genomes")
with tempfile.NamedTemporaryFile() as inputlist, tempfile.NamedTemporaryFile() as outres:
# Dump the list of bloom filter file paths
with open(inputlist.name, "wt") as inputlist_file:
for filepath in genomes_paths:
inputlist_file.write("{}\n".format(filepath))
# Run kitsune
# This may take a while and a considerable amount of computational resources
run(
[
"kitsune",
"kopt",
"--filenames",
inputlist.name,
"--k-max",
kl,
"--canonical",
"--fast",
"--nproc",
nproc,
"--threads",
threads,
"--in-memory",
"--output",
outres.name,
"--closely_related" if closely_related else ""
],
silence=True
)
# Get kitsune output message
out_content = open(outres.name).read().strip()
try:
# Try to retrieve the optimal k
return int(out_content.split(" ")[-1])
except Exception as ex:
raise Exception("An error has occurred while running kitsune kopt:\n{}".format(out_content)).with_traceback(
ex.__traceback__
)
def println(message: str, logger: Optional[Logger] = None, verbose: bool = True) -> None:
"""
Send messages to the logger
It will print messages on screen, send messages to the log file, or both
:param message: Custom message
:param logger: Logger object
:param verbose: Print messages on sceeen if True and logger is None
"""
if logger:
# Redirect messages to the logger
logger.info(message)
elif verbose:
# In case the logger is not defined
# Redirect messages to the screen
print(message)
def run(
cmdline: List[Union[str, int, float]],
stdout: Union[int, TextIO] = sys.stdout,
stderr: Union[int, TextIO] = sys.stderr,
silence: bool = False,
extended_error: bool = False,
retries: int = 1,
) -> None:
"""
Wrapper for the subprocess.check_call function
:param cmdline: Command line list
:param stdout: Standard output
:param stderr: Standard error
:param silence: Redirect stdout and stderr to /dev/null
:param extended_error: Raise errors with traceback in case of unexpected exceptions
:param retries: Try running the process again in case of errors
"""
# Check whether ther is something to run
if cmdline:
while retries > 0:
try:
# Cast everything to string in cmdline
cmdline = [str(cmd) for cmd in cmdline]
# In case of silence
if silence:
# Redirect the stdout and stderr to /dev/null
stdout = subprocess.DEVNULL
stderr = subprocess.DEVNULL
# Run a specific command line and redirect the stdout and stderr
# to those specified in input
subprocess.check_call(cmdline, stdout=stdout, stderr=stderr)
# At this point, the execution of the command did not raise any exception
# Set retries to 0
retries = 0
except subprocess.CalledProcessError as e:
if retries == 1:
# Define the error message
error_message = "\nAn error has occurred while running the following command:\n{}\n\n".format(
" ".join(cmdline)
)
if extended_error:
# Extend the error message
error_message += (
"If you think this is a bug and need support, please open an Issue or a new Discussion on the official GitHub repository.\n"
"We would be happy to answer your questions and help you troubleshoot any kind of issue with our framework.\n"
)
raise Exception(error_message).with_traceback(e.__traceback__)
# Try again
retries -= 1
else:
# There is nothing to run
raise Exception("Empty command line!")
def validate_url(url: str) -> bool:
"""
Validate a URL
:param url: Input URL to be validated
:return: True if validated, False otherwise
"""
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain
r'localhost|' # localhost
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE
)
return re.match(regex, url) is not None | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/moment/locale/en-gb.js |
;(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined'
&& typeof require === 'function' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['../moment'], factory) :
factory(global.moment)
}(this, (function (moment) { 'use strict';
//! moment.js locale configuration
var enGb = moment.defineLocale('en-gb', {
months: 'January_February_March_April_May_June_July_August_September_October_November_December'.split(
'_'
),
monthsShort: 'Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec'.split('_'),
weekdays: 'Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday'.split(
'_'
),
weekdaysShort: 'Sun_Mon_Tue_Wed_Thu_Fri_Sat'.split('_'),
weekdaysMin: 'Su_Mo_Tu_We_Th_Fr_Sa'.split('_'),
longDateFormat: {
LT: 'HH:mm',
LTS: 'HH:mm:ss',
L: 'DD/MM/YYYY',
LL: 'D MMMM YYYY',
LLL: 'D MMMM YYYY HH:mm',
LLLL: 'dddd, D MMMM YYYY HH:mm',
},
calendar: {
sameDay: '[Today at] LT',
nextDay: '[Tomorrow at] LT',
nextWeek: 'dddd [at] LT',
lastDay: '[Yesterday at] LT',
lastWeek: '[Last] dddd [at] LT',
sameElse: 'L',
},
relativeTime: {
future: 'in %s',
past: '%s ago',
s: 'a few seconds',
ss: '%d seconds',
m: 'a minute',
mm: '%d minutes',
h: 'an hour',
hh: '%d hours',
d: 'a day',
dd: '%d days',
M: 'a month',
MM: '%d months',
y: 'a year',
yy: '%d years',
},
dayOfMonthOrdinalParse: /\d{1,2}(st|nd|rd|th)/,
ordinal: function (number) {
var b = number % 10,
output =
~~((number % 100) / 10) === 1
? 'th'
: b === 1
? 'st'
: b === 2
? 'nd'
: b === 3
? 'rd'
: 'th';
return number + output;
},
week: {
dow: 1, // Monday is the first day of the week.
doy: 4, // The week that contains Jan 4th is the first week of the year.
},
});
return enGb;
}))); | PypiClean |
/Flask-MDEditor-0.1.4.tar.gz/Flask-MDEditor-0.1.4/flask_mdeditor/static/mdeditor/js/lib/codemirror/addon/selection/mark-selection.js |
// Because sometimes you need to mark the selected *text*.
//
// Adds an option 'styleSelectedText' which, when enabled, gives
// selected text the CSS class given as option value, or
// "CodeMirror-selectedtext" when the value is not a string.
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineOption("styleSelectedText", false, function(cm, val, old) {
var prev = old && old != CodeMirror.Init;
if (val && !prev) {
cm.state.markedSelection = [];
cm.state.markedSelectionStyle = typeof val == "string" ? val : "CodeMirror-selectedtext";
reset(cm);
cm.on("cursorActivity", onCursorActivity);
cm.on("change", onChange);
} else if (!val && prev) {
cm.off("cursorActivity", onCursorActivity);
cm.off("change", onChange);
clear(cm);
cm.state.markedSelection = cm.state.markedSelectionStyle = null;
}
});
function onCursorActivity(cm) {
cm.operation(function() { update(cm); });
}
function onChange(cm) {
if (cm.state.markedSelection.length)
cm.operation(function() { clear(cm); });
}
var CHUNK_SIZE = 8;
var Pos = CodeMirror.Pos;
var cmp = CodeMirror.cmpPos;
function coverRange(cm, from, to, addAt) {
if (cmp(from, to) == 0) return;
var array = cm.state.markedSelection;
var cls = cm.state.markedSelectionStyle;
for (var line = from.line;;) {
var start = line == from.line ? from : Pos(line, 0);
var endLine = line + CHUNK_SIZE, atEnd = endLine >= to.line;
var end = atEnd ? to : Pos(endLine, 0);
var mark = cm.markText(start, end, {className: cls});
if (addAt == null) array.push(mark);
else array.splice(addAt++, 0, mark);
if (atEnd) break;
line = endLine;
}
}
function clear(cm) {
var array = cm.state.markedSelection;
for (var i = 0; i < array.length; ++i) array[i].clear();
array.length = 0;
}
function reset(cm) {
clear(cm);
var ranges = cm.listSelections();
for (var i = 0; i < ranges.length; i++)
coverRange(cm, ranges[i].from(), ranges[i].to());
}
function update(cm) {
if (!cm.somethingSelected()) return clear(cm);
if (cm.listSelections().length > 1) return reset(cm);
var from = cm.getCursor("start"), to = cm.getCursor("end");
var array = cm.state.markedSelection;
if (!array.length) return coverRange(cm, from, to);
var coverStart = array[0].find(), coverEnd = array[array.length - 1].find();
if (!coverStart || !coverEnd || to.line - from.line < CHUNK_SIZE ||
cmp(from, coverEnd.to) >= 0 || cmp(to, coverStart.from) <= 0)
return reset(cm);
while (cmp(from, coverStart.from) > 0) {
array.shift().clear();
coverStart = array[0].find();
}
if (cmp(from, coverStart.from) < 0) {
if (coverStart.to.line - from.line < CHUNK_SIZE) {
array.shift().clear();
coverRange(cm, from, coverStart.to, 0);
} else {
coverRange(cm, from, coverStart.from, 0);
}
}
while (cmp(to, coverEnd.to) < 0) {
array.pop().clear();
coverEnd = array[array.length - 1].find();
}
if (cmp(to, coverEnd.to) > 0) {
if (to.line - coverEnd.from.line < CHUNK_SIZE) {
array.pop().clear();
coverRange(cm, coverEnd.from, to);
} else {
coverRange(cm, coverEnd.to, to);
}
}
}
}); | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dijit/form/_AutoCompleterMixin.js.uncompressed.js | define("dijit/form/_AutoCompleterMixin", [
"dojo/_base/connect", // keys keys.SHIFT
"dojo/data/util/filter", // patternToRegExp
"dojo/_base/declare", // declare
"dojo/_base/Deferred", // Deferred.when
"dojo/dom-attr", // domAttr.get
"dojo/_base/event", // event.stop
"dojo/keys",
"dojo/_base/lang", // lang.clone lang.hitch
"dojo/query", // query
"dojo/regexp", // regexp.escapeString
"dojo/_base/sniff", // has("ie")
"dojo/string", // string.substitute
"dojo/_base/window", // win.doc.selection.createRange
"./DataList",
"../registry", // registry.byId
"./_TextBoxMixin" // defines _TextBoxMixin.selectInputText
], function(connect, filter, declare, Deferred, domAttr, event, keys, lang, query, regexp, has, string, win,
DataList, registry, _TextBoxMixin){
// module:
// dijit/form/_AutoCompleterMixin
// summary:
// A mixin that implements the base functionality for `dijit.form.ComboBox`/`dijit.form.FilteringSelect`
return declare("dijit.form._AutoCompleterMixin", null, {
// summary:
// A mixin that implements the base functionality for `dijit.form.ComboBox`/`dijit.form.FilteringSelect`
// description:
// All widgets that mix in dijit.form._AutoCompleterMixin must extend `dijit.form._FormValueWidget`.
// tags:
// protected
// item: Object
// This is the item returned by the dojo.data.store implementation that
// provides the data for this ComboBox, it's the currently selected item.
item: null,
// pageSize: Integer
// Argument to data provider.
// Specifies number of search results per page (before hitting "next" button)
pageSize: Infinity,
// store: [const] dojo.store.api.Store
// Reference to data provider object used by this ComboBox
store: null,
// fetchProperties: Object
// Mixin to the store's fetch.
// For example, to set the sort order of the ComboBox menu, pass:
// | { sort: [{attribute:"name",descending: true}] }
// To override the default queryOptions so that deep=false, do:
// | { queryOptions: {ignoreCase: true, deep: false} }
fetchProperties:{},
// query: Object
// A query that can be passed to 'store' to initially filter the items,
// before doing further filtering based on `searchAttr` and the key.
// Any reference to the `searchAttr` is ignored.
query: {},
// autoComplete: Boolean
// If user types in a partial string, and then tab out of the `<input>` box,
// automatically copy the first entry displayed in the drop down list to
// the `<input>` field
autoComplete: true,
// highlightMatch: String
// One of: "first", "all" or "none".
//
// If the ComboBox/FilteringSelect opens with the search results and the searched
// string can be found, it will be highlighted. If set to "all"
// then will probably want to change `queryExpr` parameter to '*${0}*'
//
// Highlighting is only performed when `labelType` is "text", so as to not
// interfere with any HTML markup an HTML label might contain.
highlightMatch: "first",
// searchDelay: Integer
// Delay in milliseconds between when user types something and we start
// searching based on that value
searchDelay: 100,
// searchAttr: String
// Search for items in the data store where this attribute (in the item)
// matches what the user typed
searchAttr: "name",
// labelAttr: String?
// The entries in the drop down list come from this attribute in the
// dojo.data items.
// If not specified, the searchAttr attribute is used instead.
labelAttr: "",
// labelType: String
// Specifies how to interpret the labelAttr in the data store items.
// Can be "html" or "text".
labelType: "text",
// queryExpr: String
// This specifies what query ComboBox/FilteringSelect sends to the data store,
// based on what the user has typed. Changing this expression will modify
// whether the drop down shows only exact matches, a "starting with" match,
// etc. Use it in conjunction with highlightMatch.
// dojo.data query expression pattern.
// `${0}` will be substituted for the user text.
// `*` is used for wildcards.
// `${0}*` means "starts with", `*${0}*` means "contains", `${0}` means "is"
queryExpr: "${0}*",
// ignoreCase: Boolean
// Set true if the ComboBox/FilteringSelect should ignore case when matching possible items
ignoreCase: true,
// Flags to _HasDropDown to limit height of drop down to make it fit in viewport
maxHeight: -1,
// For backwards compatibility let onClick events propagate, even clicks on the down arrow button
_stopClickEvents: false,
_getCaretPos: function(/*DomNode*/ element){
// khtml 3.5.2 has selection* methods as does webkit nightlies from 2005-06-22
var pos = 0;
if(typeof(element.selectionStart) == "number"){
// FIXME: this is totally borked on Moz < 1.3. Any recourse?
pos = element.selectionStart;
}else if(has("ie")){
// in the case of a mouse click in a popup being handled,
// then the win.doc.selection is not the textarea, but the popup
// var r = win.doc.selection.createRange();
// hack to get IE 6 to play nice. What a POS browser.
var tr = win.doc.selection.createRange().duplicate();
var ntr = element.createTextRange();
tr.move("character",0);
ntr.move("character",0);
try{
// If control doesn't have focus, you get an exception.
// Seems to happen on reverse-tab, but can also happen on tab (seems to be a race condition - only happens sometimes).
// There appears to be no workaround for this - googled for quite a while.
ntr.setEndPoint("EndToEnd", tr);
pos = String(ntr.text).replace(/\r/g,"").length;
}catch(e){
// If focus has shifted, 0 is fine for caret pos.
}
}
return pos;
},
_setCaretPos: function(/*DomNode*/ element, /*Number*/ location){
location = parseInt(location);
_TextBoxMixin.selectInputText(element, location, location);
},
_setDisabledAttr: function(/*Boolean*/ value){
// Additional code to set disabled state of ComboBox node.
// Overrides _FormValueWidget._setDisabledAttr() or ValidationTextBox._setDisabledAttr().
this.inherited(arguments);
this.domNode.setAttribute("aria-disabled", value);
},
_abortQuery: function(){
// stop in-progress query
if(this.searchTimer){
clearTimeout(this.searchTimer);
this.searchTimer = null;
}
if(this._fetchHandle){
if(this._fetchHandle.cancel){
this._cancelingQuery = true;
this._fetchHandle.cancel();
this._cancelingQuery = false;
}
this._fetchHandle = null;
}
},
_onInput: function(/*Event*/ evt){
// summary:
// Handles paste events
this.inherited(arguments);
if(evt.charOrCode == 229){ // IME or cut/paste event
this._onKey(evt);
}
},
_onKey: function(/*Event*/ evt){
// summary:
// Handles keyboard events
var key = evt.charOrCode;
// except for cutting/pasting case - ctrl + x/v
if(evt.altKey || ((evt.ctrlKey || evt.metaKey) && (key != 'x' && key != 'v')) || key == keys.SHIFT){
return; // throw out weird key combinations and spurious events
}
var doSearch = false;
var pw = this.dropDown;
var highlighted = null;
this._prev_key_backspace = false;
this._abortQuery();
// _HasDropDown will do some of the work:
// 1. when drop down is not yet shown:
// - if user presses the down arrow key, call loadDropDown()
// 2. when drop down is already displayed:
// - on ESC key, call closeDropDown()
// - otherwise, call dropDown.handleKey() to process the keystroke
this.inherited(arguments);
if(this._opened){
highlighted = pw.getHighlightedOption();
}
switch(key){
case keys.PAGE_DOWN:
case keys.DOWN_ARROW:
case keys.PAGE_UP:
case keys.UP_ARROW:
// Keystroke caused ComboBox_menu to move to a different item.
// Copy new item to <input> box.
if(this._opened){
this._announceOption(highlighted);
}
event.stop(evt);
break;
case keys.ENTER:
// prevent submitting form if user presses enter. Also
// prevent accepting the value if either Next or Previous
// are selected
if(highlighted){
// only stop event on prev/next
if(highlighted == pw.nextButton){
this._nextSearch(1);
event.stop(evt);
break;
}else if(highlighted == pw.previousButton){
this._nextSearch(-1);
event.stop(evt);
break;
}
}else{
// Update 'value' (ex: KY) according to currently displayed text
this._setBlurValue(); // set value if needed
this._setCaretPos(this.focusNode, this.focusNode.value.length); // move cursor to end and cancel highlighting
}
// default case:
// if enter pressed while drop down is open, or for FilteringSelect,
// if we are in the middle of a query to convert a directly typed in value to an item,
// prevent submit
if(this._opened || this._fetchHandle){
event.stop(evt);
}
// fall through
case keys.TAB:
var newvalue = this.get('displayedValue');
// if the user had More Choices selected fall into the
// _onBlur handler
if(pw && (
newvalue == pw._messages["previousMessage"] ||
newvalue == pw._messages["nextMessage"])
){
break;
}
if(highlighted){
this._selectOption(highlighted);
}
// fall through
case keys.ESCAPE:
if(this._opened){
this._lastQuery = null; // in case results come back later
this.closeDropDown();
}
break;
case ' ':
if(highlighted){
// user is effectively clicking a choice in the drop down menu
event.stop(evt);
this._selectOption(highlighted);
this.closeDropDown();
}else{
// user typed a space into the input box, treat as normal character
doSearch = true;
}
break;
case keys.DELETE:
case keys.BACKSPACE:
this._prev_key_backspace = true;
doSearch = true;
break;
default:
// Non char keys (F1-F12 etc..) shouldn't open list.
// Ascii characters and IME input (Chinese, Japanese etc.) should.
//IME input produces keycode == 229.
doSearch = typeof key == 'string' || key == 229;
}
if(doSearch){
// need to wait a tad before start search so that the event
// bubbles through DOM and we have value visible
this.item = undefined; // undefined means item needs to be set
this.searchTimer = setTimeout(lang.hitch(this, "_startSearchFromInput"),1);
}
},
_autoCompleteText: function(/*String*/ text){
// summary:
// Fill in the textbox with the first item from the drop down
// list, and highlight the characters that were
// auto-completed. For example, if user typed "CA" and the
// drop down list appeared, the textbox would be changed to
// "California" and "ifornia" would be highlighted.
var fn = this.focusNode;
// IE7: clear selection so next highlight works all the time
_TextBoxMixin.selectInputText(fn, fn.value.length);
// does text autoComplete the value in the textbox?
var caseFilter = this.ignoreCase? 'toLowerCase' : 'substr';
if(text[caseFilter](0).indexOf(this.focusNode.value[caseFilter](0)) == 0){
var cpos = this.autoComplete ? this._getCaretPos(fn) : fn.value.length;
// only try to extend if we added the last character at the end of the input
if((cpos+1) > fn.value.length){
// only add to input node as we would overwrite Capitalisation of chars
// actually, that is ok
fn.value = text;//.substr(cpos);
// visually highlight the autocompleted characters
_TextBoxMixin.selectInputText(fn, cpos);
}
}else{
// text does not autoComplete; replace the whole value and highlight
fn.value = text;
_TextBoxMixin.selectInputText(fn);
}
},
_openResultList: function(/*Object*/ results, /*Object*/ query, /*Object*/ options){
// summary:
// Callback when a search completes.
// description:
// 1. generates drop-down list and calls _showResultList() to display it
// 2. if this result list is from user pressing "more choices"/"previous choices"
// then tell screen reader to announce new option
this._fetchHandle = null;
if( this.disabled ||
this.readOnly ||
(query[this.searchAttr] !== this._lastQuery) // TODO: better way to avoid getting unwanted notify
){
return;
}
var wasSelected = this.dropDown.getHighlightedOption();
this.dropDown.clearResultList();
if(!results.length && options.start == 0){ // if no results and not just the previous choices button
this.closeDropDown();
return;
}
// Fill in the textbox with the first item from the drop down list,
// and highlight the characters that were auto-completed. For
// example, if user typed "CA" and the drop down list appeared, the
// textbox would be changed to "California" and "ifornia" would be
// highlighted.
var nodes = this.dropDown.createOptions(
results,
options,
lang.hitch(this, "_getMenuLabelFromItem")
);
// show our list (only if we have content, else nothing)
this._showResultList();
// #4091:
// tell the screen reader that the paging callback finished by
// shouting the next choice
if(options.direction){
if(1 == options.direction){
this.dropDown.highlightFirstOption();
}else if(-1 == options.direction){
this.dropDown.highlightLastOption();
}
if(wasSelected){
this._announceOption(this.dropDown.getHighlightedOption());
}
}else if(this.autoComplete && !this._prev_key_backspace
// when the user clicks the arrow button to show the full list,
// startSearch looks for "*".
// it does not make sense to autocomplete
// if they are just previewing the options available.
&& !/^[*]+$/.test(query[this.searchAttr].toString())){
this._announceOption(nodes[1]); // 1st real item
}
},
_showResultList: function(){
// summary:
// Display the drop down if not already displayed, or if it is displayed, then
// reposition it if necessary (reposition may be necessary if drop down's height changed).
this.closeDropDown(true);
this.openDropDown();
this.domNode.setAttribute("aria-expanded", "true");
},
loadDropDown: function(/*Function*/ /*===== callback =====*/){
// Overrides _HasDropDown.loadDropDown().
// This is called when user has pressed button icon or pressed the down arrow key
// to open the drop down.
this._startSearchAll();
},
isLoaded: function(){
// signal to _HasDropDown that it needs to call loadDropDown() to load the
// drop down asynchronously before displaying it
return false;
},
closeDropDown: function(){
// Overrides _HasDropDown.closeDropDown(). Closes the drop down (assuming that it's open).
// This method is the callback when the user types ESC or clicking
// the button icon while the drop down is open. It's also called by other code.
this._abortQuery();
if(this._opened){
this.inherited(arguments);
this.domNode.setAttribute("aria-expanded", "false");
this.focusNode.removeAttribute("aria-activedescendant");
}
},
_setBlurValue: function(){
// if the user clicks away from the textbox OR tabs away, set the
// value to the textbox value
// #4617:
// if value is now more choices or previous choices, revert
// the value
var newvalue = this.get('displayedValue');
var pw = this.dropDown;
if(pw && (
newvalue == pw._messages["previousMessage"] ||
newvalue == pw._messages["nextMessage"]
)
){
this._setValueAttr(this._lastValueReported, true);
}else if(typeof this.item == "undefined"){
// Update 'value' (ex: KY) according to currently displayed text
this.item = null;
this.set('displayedValue', newvalue);
}else{
if(this.value != this._lastValueReported){
this._handleOnChange(this.value, true);
}
this._refreshState();
}
},
_setItemAttr: function(/*item*/ item, /*Boolean?*/ priorityChange, /*String?*/ displayedValue){
// summary:
// Set the displayed valued in the input box, and the hidden value
// that gets submitted, based on a dojo.data store item.
// description:
// Users shouldn't call this function; they should be calling
// set('item', value)
// tags:
// private
var value = '';
if(item){
if(!displayedValue){
displayedValue = this.store._oldAPI ? // remove getValue() for 2.0 (old dojo.data API)
this.store.getValue(item, this.searchAttr) : item[this.searchAttr];
}
value = this._getValueField() != this.searchAttr ? this.store.getIdentity(item) : displayedValue;
}
this.set('value', value, priorityChange, displayedValue, item);
},
_announceOption: function(/*Node*/ node){
// summary:
// a11y code that puts the highlighted option in the textbox.
// This way screen readers will know what is happening in the
// menu.
if(!node){
return;
}
// pull the text value from the item attached to the DOM node
var newValue;
if(node == this.dropDown.nextButton ||
node == this.dropDown.previousButton){
newValue = node.innerHTML;
this.item = undefined;
this.value = '';
}else{
newValue = (this.store._oldAPI ? // remove getValue() for 2.0 (old dojo.data API)
this.store.getValue(node.item, this.searchAttr) : node.item[this.searchAttr]).toString();
this.set('item', node.item, false, newValue);
}
// get the text that the user manually entered (cut off autocompleted text)
this.focusNode.value = this.focusNode.value.substring(0, this._lastInput.length);
// set up ARIA activedescendant
this.focusNode.setAttribute("aria-activedescendant", domAttr.get(node, "id"));
// autocomplete the rest of the option to announce change
this._autoCompleteText(newValue);
},
_selectOption: function(/*DomNode*/ target){
// summary:
// Menu callback function, called when an item in the menu is selected.
this.closeDropDown();
if(target){
this._announceOption(target);
}
this._setCaretPos(this.focusNode, this.focusNode.value.length);
this._handleOnChange(this.value, true);
},
_startSearchAll: function(){
this._startSearch('');
},
_startSearchFromInput: function(){
this._startSearch(this.focusNode.value.replace(/([\\\*\?])/g, "\\$1"));
},
_getQueryString: function(/*String*/ text){
return string.substitute(this.queryExpr, [text]);
},
_startSearch: function(/*String*/ key){
// summary:
// Starts a search for elements matching key (key=="" means to return all items),
// and calls _openResultList() when the search completes, to display the results.
if(!this.dropDown){
var popupId = this.id + "_popup",
dropDownConstructor = lang.isString(this.dropDownClass) ?
lang.getObject(this.dropDownClass, false) : this.dropDownClass;
this.dropDown = new dropDownConstructor({
onChange: lang.hitch(this, this._selectOption),
id: popupId,
dir: this.dir,
textDir: this.textDir
});
this.focusNode.removeAttribute("aria-activedescendant");
this.textbox.setAttribute("aria-owns",popupId); // associate popup with textbox
}
this._lastInput = key; // Store exactly what was entered by the user.
// Setup parameters to be passed to store.query().
// Create a new query to prevent accidentally querying for a hidden
// value from FilteringSelect's keyField
var query = lang.clone(this.query); // #5970
var options = {
start: 0,
count: this.pageSize,
queryOptions: { // remove for 2.0
ignoreCase: this.ignoreCase,
deep: true
}
};
lang.mixin(options, this.fetchProperties);
// Generate query
var qs = this._getQueryString(key), q;
if(this.store._oldAPI){
// remove this branch for 2.0
q = qs;
}else{
// Query on searchAttr is a regex for benefit of dojo.store.Memory,
// but with a toString() method to help dojo.store.JsonRest.
// Search string like "Co*" converted to regex like /^Co.*$/i.
q = filter.patternToRegExp(qs, this.ignoreCase);
q.toString = function(){ return qs; };
}
this._lastQuery = query[this.searchAttr] = q;
// Function to run the query, wait for the results, and then call _openResultList()
var _this = this,
startQuery = function(){
var resPromise = _this._fetchHandle = _this.store.query(query, options);
Deferred.when(resPromise, function(res){
_this._fetchHandle = null;
res.total = resPromise.total;
_this._openResultList(res, query, options);
}, function(err){
_this._fetchHandle = null;
if(!_this._cancelingQuery){ // don't treat canceled query as an error
console.error(_this.declaredClass + ' ' + err.toString());
_this.closeDropDown();
}
});
};
// #5970: set _lastQuery, *then* start the timeout
// otherwise, if the user types and the last query returns before the timeout,
// _lastQuery won't be set and their input gets rewritten
this.searchTimer = setTimeout(lang.hitch(this, function(query, _this){
this.searchTimer = null;
startQuery();
// Setup method to handle clicking next/previous buttons to page through results
this._nextSearch = this.dropDown.onPage = function(direction){
options.start += options.count * direction;
// tell callback the direction of the paging so the screen
// reader knows which menu option to shout
options.direction = direction;
startQuery();
_this.focus();
};
}, query, this), this.searchDelay);
},
_getValueField: function(){
// summary:
// Helper for postMixInProperties() to set this.value based on data inlined into the markup.
// Returns the attribute name in the item (in dijit.form._ComboBoxDataStore) to use as the value.
return this.searchAttr;
},
//////////// INITIALIZATION METHODS ///////////////////////////////////////
constructor: function(){
this.query={};
this.fetchProperties={};
},
postMixInProperties: function(){
if(!this.store){
var srcNodeRef = this.srcNodeRef;
var list = this.list;
if(list){
this.store = registry.byId(list);
}else{
// if user didn't specify store, then assume there are option tags
this.store = new DataList({}, srcNodeRef);
}
// if there is no value set and there is an option list, set
// the value to the first value to be consistent with native Select
// Firefox and Safari set value
// IE6 and Opera set selectedIndex, which is automatically set
// by the selected attribute of an option tag
// IE6 does not set value, Opera sets value = selectedIndex
if(!("value" in this.params)){
var item = (this.item = this.store.fetchSelectedItem());
if(item){
var valueField = this._getValueField();
// remove getValue() for 2.0 (old dojo.data API)
this.value = this.store._oldAPI ? this.store.getValue(item, valueField) : item[valueField];
}
}
}
this.inherited(arguments);
},
postCreate: function(){
// summary:
// Subclasses must call this method from their postCreate() methods
// tags:
// protected
// find any associated label element and add to ComboBox node.
var label=query('label[for="'+this.id+'"]');
if(label.length){
label[0].id = (this.id+"_label");
this.domNode.setAttribute("aria-labelledby", label[0].id);
}
this.inherited(arguments);
},
_getMenuLabelFromItem: function(/*Item*/ item){
var label = this.labelFunc(item, this.store),
labelType = this.labelType;
// If labelType is not "text" we don't want to screw any markup ot whatever.
if(this.highlightMatch != "none" && this.labelType == "text" && this._lastInput){
label = this.doHighlight(label, this._escapeHtml(this._lastInput));
labelType = "html";
}
return {html: labelType == "html", label: label};
},
doHighlight: function(/*String*/ label, /*String*/ find){
// summary:
// Highlights the string entered by the user in the menu. By default this
// highlights the first occurrence found. Override this method
// to implement your custom highlighting.
// tags:
// protected
var
// Add (g)lobal modifier when this.highlightMatch == "all" and (i)gnorecase when this.ignoreCase == true
modifiers = (this.ignoreCase ? "i" : "") + (this.highlightMatch == "all" ? "g" : ""),
i = this.queryExpr.indexOf("${0}");
find = regexp.escapeString(find); // escape regexp special chars
return this._escapeHtml(label).replace(
// prepend ^ when this.queryExpr == "${0}*" and append $ when this.queryExpr == "*${0}"
new RegExp((i == 0 ? "^" : "") + "("+ find +")" + (i == (this.queryExpr.length - 4) ? "$" : ""), modifiers),
'<span class="dijitComboBoxHighlightMatch">$1</span>'
); // returns String, (almost) valid HTML (entities encoded)
},
_escapeHtml: function(/*String*/ str){
// TODO Should become dojo.html.entities(), when exists use instead
// summary:
// Adds escape sequences for special characters in XML: &<>"'
str = String(str).replace(/&/gm, "&").replace(/</gm, "<")
.replace(/>/gm, ">").replace(/"/gm, """); //balance"
return str; // string
},
reset: function(){
// Overrides the _FormWidget.reset().
// Additionally reset the .item (to clean up).
this.item = null;
this.inherited(arguments);
},
labelFunc: function(/*item*/ item, /*dojo.store.api.Store*/ store){
// summary:
// Computes the label to display based on the dojo.data store item.
// returns:
// The label that the ComboBox should display
// tags:
// private
// Use toString() because XMLStore returns an XMLItem whereas this
// method is expected to return a String (#9354).
// Remove getValue() for 2.0 (old dojo.data API)
return (store._oldAPI ? store.getValue(item, this.labelAttr || this.searchAttr) :
item[this.labelAttr || this.searchAttr]).toString(); // String
},
_setValueAttr: function(/*String*/ value, /*Boolean?*/ priorityChange, /*String?*/ displayedValue, /*item?*/ item){
// summary:
// Hook so set('value', value) works.
// description:
// Sets the value of the select.
this._set("item", item||null); // value not looked up in store
if(!value){ value = ''; } // null translates to blank
this.inherited(arguments);
},
_setTextDirAttr: function(/*String*/ textDir){
// summary:
// Setter for textDir, needed for the dropDown's textDir update.
// description:
// Users shouldn't call this function; they should be calling
// set('textDir', value)
// tags:
// private
this.inherited(arguments);
// update the drop down also (_ComboBoxMenuMixin)
if(this.dropDown){
this.dropDown._set("textDir", textDir);
}
}
});
}); | PypiClean |
/Kr0nOs-3.4.1.tar.gz/Kr0nOs-3.4.1/kronbot/core/bot.py | import asyncio
import contextlib
import inspect
import logging
import os
import platform
import re
import shutil
import sys
from collections import namedtuple
from datetime import datetime
from enum import IntEnum
from importlib.machinery import ModuleSpec
from pathlib import Path
from types import MappingProxyType
from typing import (
Any,
Awaitable,
Callable,
Coroutine,
Dict,
List,
NoReturn,
Optional,
Set,
TypeVar,
Union,
)
import discord
from discord.ext import commands as dpy_commands
from discord.ext.commands import when_mentioned_or
from discord.ext.commands.bot import BotBase
from . import Config, bank, commands, drivers, errors, i18n, modlog
from .cog_manager import CogManager, CogManagerUI
from .core_commands import Core, license_info_command
from .data_manager import cog_data_path
from .dev_commands import Dev
from .events import init_events
from .global_checks import init_global_checks
from .rpc import RPCMixin
from .settings_caches import IgnoreManager, PrefixManager, WhitelistBlacklistManager
from .utils import common_filters
from .utils._internal_utils import send_to_owners_with_prefix_replaced
CUSTOM_GROUPS = "CUSTOM_GROUPS"
SHARED_API_TOKENS = "SHARED_API_TOKENS"
log = logging.getLogger("kronbot")
__all__ = ["KronBase", "Kron", "ExitCodes"]
NotMessage = namedtuple("NotMessage", "guild")
PreInvokeCoroutine = Callable[[commands.Context], Awaitable[Any]]
T_BIC = TypeVar("T_BIC", bound=PreInvokeCoroutine)
def _is_submodule(parent, child):
return parent == child or child.startswith(parent + ".")
# barely spurious warning caused by our intentional shadowing
class KronBase(
commands.GroupMixin, dpy_commands.bot.BotBase, RPCMixin
): # pylint: disable=no-member
"""Mixin for the main bot class.
This exists because `Kron` inherits from `discord.AutoShardedClient`, which
is something other bot classes may not want to have as a parent class.
"""
def __init__(self, *args, cli_flags=None, bot_dir: Path = Path.cwd(), **kwargs):
self._shutdown_mode = ExitCodes.CRITICAL
self._cli_flags = cli_flags
self._config = Config.get_core_conf(force_registration=False)
self._co_owners = cli_flags.co_owner
self.rpc_enabled = cli_flags.rpc
self.rpc_port = cli_flags.rpc_port
self._last_exception = None
self._config.register_global(
token=None,
prefix=[],
packages=[],
owner=None,
whitelist=[],
blacklist=[],
locale="en-US",
regional_format=None,
embeds=True,
color=15158332,
fuzzy=False,
custom_info=None,
help__page_char_limit=1000,
help__max_pages_in_guild=2,
help__delete_delay=0,
help__use_menus=False,
help__show_hidden=False,
help__verify_checks=True,
help__verify_exists=False,
help__tagline="",
description="Kron V3",
invite_public=False,
invite_perm=0,
disabled_commands=[],
disabled_command_msg="That command is disabled.",
extra_owner_destinations=[],
owner_opt_out_list=[],
last_system_info__python_version=[3, 7],
last_system_info__machine=None,
last_system_info__system=None,
schema_version=0,
)
self._config.register_guild(
prefix=[],
whitelist=[],
blacklist=[],
admin_role=[],
mod_role=[],
embeds=None,
ignored=False,
use_bot_color=False,
fuzzy=False,
disabled_commands=[],
autoimmune_ids=[],
delete_delay=-1,
)
self._config.register_channel(embeds=None, ignored=False)
self._config.register_user(embeds=None)
self._config.init_custom(CUSTOM_GROUPS, 2)
self._config.register_custom(CUSTOM_GROUPS)
self._config.init_custom(SHARED_API_TOKENS, 2)
self._config.register_custom(SHARED_API_TOKENS)
self._prefix_cache = PrefixManager(self._config, cli_flags)
self._ignored_cache = IgnoreManager(self._config)
self._whiteblacklist_cache = WhitelistBlacklistManager(self._config)
async def prefix_manager(bot, message) -> List[str]:
prefixes = await self._prefix_cache.get_prefixes(message.guild)
if cli_flags.mentionable:
return when_mentioned_or(*prefixes)(bot, message)
return prefixes
if "command_prefix" not in kwargs:
kwargs["command_prefix"] = prefix_manager
if cli_flags.owner and "owner_id" not in kwargs:
kwargs["owner_id"] = cli_flags.owner
if "command_not_found" not in kwargs:
kwargs["command_not_found"] = "Command {} not found.\n{}"
message_cache_size = cli_flags.message_cache_size
if cli_flags.no_message_cache:
message_cache_size = None
kwargs["max_messages"] = message_cache_size
self._max_messages = message_cache_size
self._uptime = None
self._checked_time_accuracy = None
self._color = discord.Embed.Empty # This is needed or color ends up 0x000000
self._main_dir = bot_dir
self._cog_mgr = CogManager()
self._use_team_features = cli_flags.use_team_features
super().__init__(*args, help_command=None, **kwargs)
# Do not manually use the help formatter attribute here, see `send_help_for`,
# for a documented API. The internals of this object are still subject to change.
self._help_formatter = commands.help.KronHelpFormatter()
self.add_command(commands.help.kron_help)
self._permissions_hooks: List[commands.CheckPredicate] = []
self._kron_ready = asyncio.Event()
self._kron_before_invoke_objs: Set[PreInvokeCoroutine] = set()
def get_command(self, name: str) -> Optional[commands.Command]:
com = super().get_command(name)
assert com is None or isinstance(com, commands.Command)
return com
def get_cog(self, name: str) -> Optional[commands.Cog]:
cog = super().get_cog(name)
assert cog is None or isinstance(cog, commands.Cog)
return cog
@property
def _before_invoke(self): # DEP-WARN
return self._kron_before_invoke_method
@_before_invoke.setter
def _before_invoke(self, val): # DEP-WARN
"""Prevent this from being overwritten in super().__init__"""
pass
async def _kron_before_invoke_method(self, ctx):
await self.wait_until_kron_ready()
return_exceptions = isinstance(ctx.command, commands.commands._AlwaysAvailableCommand)
if self._kron_before_invoke_objs:
await asyncio.gather(
*(coro(ctx) for coro in self._kron_before_invoke_objs),
return_exceptions=return_exceptions,
)
def remove_before_invoke_hook(self, coro: PreInvokeCoroutine) -> None:
"""
Functional method to remove a `before_invoke` hook.
"""
self._kron_before_invoke_objs.discard(coro)
def before_invoke(self, coro: T_BIC) -> T_BIC:
"""
Overridden decorator method for Kron's ``before_invoke`` behavior.
This can safely be used purely functionally as well.
3rd party cogs should remove any hooks which they register at unload
using `remove_before_invoke_hook`
Below behavior shared with discord.py:
.. note::
The ``before_invoke`` hooks are
only called if all checks and argument parsing procedures pass
without error. If any check or argument parsing procedures fail
then the hooks are not called.
Parameters
----------
coro: Callable[[commands.Context], Awaitable[Any]]
The coroutine to register as the pre-invoke hook.
Raises
------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The pre-invoke hook must be a coroutine.")
self._kron_before_invoke_objs.add(coro)
return coro
@property
def cog_mgr(self) -> NoReturn:
raise AttributeError("Please don't mess with the cog manager internals.")
@property
def uptime(self) -> datetime:
""" Allow access to the value, but we don't want cog creators setting it """
return self._uptime
@uptime.setter
def uptime(self, value) -> NoReturn:
raise RuntimeError(
"Hey, we're cool with sharing info about the uptime, but don't try and assign to it please."
)
@property
def db(self) -> NoReturn:
raise AttributeError(
"We really don't want you touching the bot config directly. "
"If you need something in here, take a look at the exposed methods "
"and use the one which corresponds to your needs or "
"open an issue if you need an additional method for your use case."
)
@property
def counter(self) -> NoReturn:
raise AttributeError(
"Please make your own counter object by importing ``Counter`` from ``collections``."
)
@property
def color(self) -> NoReturn:
raise AttributeError("Please fetch the embed color with `get_embed_color`")
@property
def colour(self) -> NoReturn:
raise AttributeError("Please fetch the embed colour with `get_embed_colour`")
@property
def max_messages(self) -> Optional[int]:
return self._max_messages
async def allowed_by_whitelist_blacklist(
self,
who: Optional[Union[discord.Member, discord.User]] = None,
*,
who_id: Optional[int] = None,
guild_id: Optional[int] = None,
role_ids: Optional[List[int]] = None,
) -> bool:
"""
This checks if a user or member is allowed to run things,
as considered by Kron's whitelist and blacklist.
If given a user object, this function will check the global lists
If given a member, this will additionally check guild lists
If omiting a user or member, you must provide a value for ``who_id``
You may also provide a value for ``guild_id`` in this case
If providing a member by guild and member ids,
you should supply ``role_ids`` as well
Parameters
----------
who : Optional[Union[discord.Member, discord.User]]
The user or member object to check
Other Parameters
----------------
who_id : Optional[int]
The id of the user or member to check
If not providing a value for ``who``, this is a required parameter.
guild_id : Optional[int]
When used in conjunction with a provided value for ``who_id``, checks
the lists for the corresponding guild as well.
role_ids : Optional[List[int]]
When used with both ``who_id`` and ``guild_id``, checks the role ids provided.
This is required for accurate checking of members in a guild if providing ids.
Raises
------
TypeError
Did not provide ``who`` or ``who_id``
Returns
-------
bool
`True` if user is allowed to run things, `False` otherwise
"""
# Contributor Note:
# All config calls are delayed until needed in this section
# All changes should be made keeping in mind that this is also used as a global check
guild = None
mocked = False # used for an accurate delayed role id expansion later.
if not who:
if not who_id:
raise TypeError("Must provide a value for either `who` or `who_id`")
mocked = True
who = discord.Object(id=who_id)
if guild_id:
guild = discord.Object(id=guild_id)
else:
guild = getattr(who, "guild", None)
if await self.is_owner(who):
return True
global_whitelist = await self._whiteblacklist_cache.get_whitelist()
if global_whitelist:
if who.id not in global_whitelist:
return False
else:
# blacklist is only used when whitelist doesn't exist.
global_blacklist = await self._whiteblacklist_cache.get_blacklist()
if who.id in global_blacklist:
return False
if guild:
if guild.owner_id == who.id:
return True
# The delayed expansion of ids to check saves time in the DM case.
# Converting to a set reduces the total lookup time in section
if mocked:
ids = {i for i in (who.id, *(role_ids or [])) if i != guild.id}
else:
# DEP-WARN
# This uses member._roles (getattr is for the user case)
# If this is removed upstream (undocumented)
# there is a silent failure potential, and role blacklist/whitelists will break.
ids = {i for i in (who.id, *(getattr(who, "_roles", []))) if i != guild.id}
guild_whitelist = await self._whiteblacklist_cache.get_whitelist(guild)
if guild_whitelist:
if ids.isdisjoint(guild_whitelist):
return False
else:
guild_blacklist = await self._whiteblacklist_cache.get_blacklist(guild)
if not ids.isdisjoint(guild_blacklist):
return False
return True
async def ignored_channel_or_guild(self, ctx: commands.Context) -> bool:
"""
This checks if the bot is meant to be ignoring commands in a channel or guild,
as considered by Kron's whitelist and blacklist.
Parameters
----------
ctx : Context of where the command is being run.
Returns
-------
bool
`True` if commands are allowed in the channel, `False` otherwise
"""
perms = ctx.channel.permissions_for(ctx.author)
surpass_ignore = (
isinstance(ctx.channel, discord.abc.PrivateChannel)
or perms.manage_guild
or await ctx.bot.is_owner(ctx.author)
or await ctx.bot.is_admin(ctx.author)
)
if surpass_ignore:
return True
guild_ignored = await self._ignored_cache.get_ignored_guild(ctx.guild)
chann_ignored = await self._ignored_cache.get_ignored_channel(ctx.channel)
return not (guild_ignored or chann_ignored and not perms.manage_channels)
async def get_valid_prefixes(self, guild: Optional[discord.Guild] = None) -> List[str]:
"""
This gets the valid prefixes for a guild.
If not provided a guild (or passed None) it will give the DM prefixes.
This is just a fancy wrapper around ``get_prefix``
Parameters
----------
guild : Optional[discord.Guild]
The guild you want prefixes for. Omit (or pass None) for the DM prefixes
Returns
-------
List[str]
If a guild was specified, the valid prefixes in that guild.
If a guild was not specified, the valid prefixes for DMs
"""
return await self.get_prefix(NotMessage(guild))
async def get_embed_color(self, location: discord.abc.Messageable) -> discord.Color:
"""
Get the embed color for a location. This takes into account all related settings.
Parameters
----------
location : `discord.abc.Messageable`
Location to check embed color for.
Returns
-------
discord.Color
Embed color for the provided location.
"""
guild = getattr(location, "guild", None)
if (
guild
and await self._config.guild(guild).use_bot_color()
and not isinstance(location, discord.Member)
):
return guild.me.color
return self._color
get_embed_colour = get_embed_color
# start config migrations
async def _maybe_update_config(self):
"""
This should be run prior to loading cogs or connecting to discord.
"""
schema_version = await self._config.schema_version()
if schema_version == 0:
await self._schema_0_to_1()
schema_version += 1
await self._config.schema_version.set(schema_version)
if schema_version == 1:
await self._schema_1_to_2()
schema_version += 1
await self._config.schema_version.set(schema_version)
async def _schema_1_to_2(self):
"""
This contains the migration of shared API tokens to a custom config scope
"""
log.info("Moving shared API tokens to a custom group")
all_shared_api_tokens = await self._config.get_raw("api_tokens", default={})
for service_name, token_mapping in all_shared_api_tokens.items():
service_partial = self._config.custom(SHARED_API_TOKENS, service_name)
async with service_partial.all() as basically_bulk_update:
basically_bulk_update.update(token_mapping)
await self._config.clear_raw("api_tokens")
async def _schema_0_to_1(self):
"""
This contains the migration to allow multiple mod and multiple admin roles.
"""
log.info("Begin updating guild configs to support multiple mod/admin roles")
all_guild_data = await self._config.all_guilds()
for guild_id, guild_data in all_guild_data.items():
guild_obj = discord.Object(id=guild_id)
mod_roles, admin_roles = [], []
maybe_mod_role_id = guild_data["mod_role"]
maybe_admin_role_id = guild_data["admin_role"]
if maybe_mod_role_id:
mod_roles.append(maybe_mod_role_id)
await self._config.guild(guild_obj).mod_role.set(mod_roles)
if maybe_admin_role_id:
admin_roles.append(maybe_admin_role_id)
await self._config.guild(guild_obj).admin_role.set(admin_roles)
log.info("Done updating guild configs to support multiple mod/admin roles")
# end Config migrations
async def pre_flight(self, cli_flags):
"""
This should only be run once, prior to connecting to discord.
"""
await self._maybe_update_config()
self.description = await self._config.description()
init_global_checks(self)
init_events(self, cli_flags)
if self.owner_id is None:
self.owner_id = await self._config.owner()
i18n_locale = await self._config.locale()
i18n.set_locale(i18n_locale)
i18n_regional_format = await self._config.regional_format()
i18n.set_regional_format(i18n_regional_format)
self.add_cog(Core(self))
self.add_cog(CogManagerUI())
self.add_command(license_info_command)
if cli_flags.dev:
self.add_cog(Dev())
await modlog._init(self)
bank._init()
packages = []
last_system_info = await self._config.last_system_info()
ver_info = list(sys.version_info[:2])
python_version_changed = False
LIB_PATH = cog_data_path(raw_name="Downloader") / "lib"
if ver_info != last_system_info["python_version"]:
await self._config.last_system_info.python_version.set(ver_info)
if any(LIB_PATH.iterdir()):
shutil.rmtree(str(LIB_PATH))
LIB_PATH.mkdir()
self.loop.create_task(
send_to_owners_with_prefix_replaced(
self,
"We detected a change in minor Python version"
" and cleared packages in lib folder.\n"
"The instance was started with no cogs, please load Downloader"
" and use `[p]cog reinstallreqs` to regenerate lib folder."
" After that, restart the bot to get"
" all of your previously loaded cogs loaded again.",
)
)
python_version_changed = True
else:
if cli_flags.no_cogs is False:
packages.extend(await self._config.packages())
if cli_flags.load_cogs:
packages.extend(cli_flags.load_cogs)
system_changed = False
machine = platform.machine()
system = platform.system()
if last_system_info["machine"] is None:
await self._config.last_system_info.machine.set(machine)
elif last_system_info["machine"] != machine:
await self._config.last_system_info.machine.set(machine)
system_changed = True
if last_system_info["system"] is None:
await self._config.last_system_info.system.set(system)
elif last_system_info["system"] != system:
await self._config.last_system_info.system.set(system)
system_changed = True
if system_changed and not python_version_changed:
self.loop.create_task(
send_to_owners_with_prefix_replaced(
self,
"We detected a possible change in machine's operating system"
" or architecture. You might need to regenerate your lib folder"
" if 3rd-party cogs stop working properly.\n"
"To regenerate lib folder, load Downloader and use `[p]cog reinstallreqs`.",
)
)
if packages:
# Load permissions first, for security reasons
try:
packages.remove("permissions")
except ValueError:
pass
else:
packages.insert(0, "permissions")
to_remove = []
print("Loading packages...")
for package in packages:
try:
spec = await self._cog_mgr.find_cog(package)
await asyncio.wait_for(self.load_extension(spec), 30)
except asyncio.TimeoutError:
log.exception("Failed to load package %s (timeout)", package)
to_remove.append(package)
except Exception as e:
log.exception("Failed to load package {}".format(package), exc_info=e)
await self.remove_loaded_package(package)
to_remove.append(package)
for package in to_remove:
packages.remove(package)
if packages:
print("Loaded packages: " + ", ".join(packages))
if self.rpc_enabled:
await self.rpc.initialize(self.rpc_port)
async def start(self, *args, **kwargs):
cli_flags = kwargs.pop("cli_flags")
await self.pre_flight(cli_flags=cli_flags)
return await super().start(*args, **kwargs)
async def send_help_for(
self, ctx: commands.Context, help_for: Union[commands.Command, commands.GroupMixin, str]
):
"""
Invokes Kron's helpformatter for a given context and object.
"""
return await self._help_formatter.send_help(ctx, help_for)
async def embed_requested(self, channel, user, command=None) -> bool:
"""
Determine if an embed is requested for a response.
Parameters
----------
channel : `discord.abc.GuildChannel` or `discord.abc.PrivateChannel`
The channel to check embed settings for.
user : `discord.abc.User`
The user to check embed settings for.
command
(Optional) the command ran.
Returns
-------
bool
:code:`True` if an embed is requested
"""
if isinstance(channel, discord.abc.PrivateChannel):
user_setting = await self._config.user(user).embeds()
if user_setting is not None:
return user_setting
else:
channel_setting = await self._config.channel(channel).embeds()
if channel_setting is not None:
return channel_setting
guild_setting = await self._config.guild(channel.guild).embeds()
if guild_setting is not None:
return guild_setting
global_setting = await self._config.embeds()
return global_setting
async def is_owner(self, user: Union[discord.User, discord.Member]) -> bool:
"""
Determines if the user should be considered a bot owner.
This takes into account CLI flags and application ownership.
By default,
application team members are not considered owners,
while individual application owners are.
Parameters
----------
user: Union[discord.User, discord.Member]
Returns
-------
bool
"""
if user.id in self._co_owners:
return True
if self.owner_id:
return self.owner_id == user.id
elif self.owner_ids:
return user.id in self.owner_ids
else:
app = await self.application_info()
if app.team:
if self._use_team_features:
self.owner_ids = ids = {m.id for m in app.team.members}
return user.id in ids
else:
self.owner_id = owner_id = app.owner.id
return user.id == owner_id
return False
async def is_admin(self, member: discord.Member) -> bool:
"""Checks if a member is an admin of their guild."""
try:
member_snowflakes = member._roles # DEP-WARN
for snowflake in await self._config.guild(member.guild).admin_role():
if member_snowflakes.has(snowflake): # Dep-WARN
return True
except AttributeError: # someone passed a webhook to this
pass
return False
async def is_mod(self, member: discord.Member) -> bool:
"""Checks if a member is a mod or admin of their guild."""
try:
member_snowflakes = member._roles # DEP-WARN
for snowflake in await self._config.guild(member.guild).admin_role():
if member_snowflakes.has(snowflake): # DEP-WARN
return True
for snowflake in await self._config.guild(member.guild).mod_role():
if member_snowflakes.has(snowflake): # DEP-WARN
return True
except AttributeError: # someone passed a webhook to this
pass
return False
async def get_admin_roles(self, guild: discord.Guild) -> List[discord.Role]:
"""
Gets the admin roles for a guild.
"""
ret: List[discord.Role] = []
for snowflake in await self._config.guild(guild).admin_role():
r = guild.get_role(snowflake)
if r:
ret.append(r)
return ret
async def get_mod_roles(self, guild: discord.Guild) -> List[discord.Role]:
"""
Gets the mod roles for a guild.
"""
ret: List[discord.Role] = []
for snowflake in await self._config.guild(guild).mod_role():
r = guild.get_role(snowflake)
if r:
ret.append(r)
return ret
async def get_admin_role_ids(self, guild_id: int) -> List[int]:
"""
Gets the admin role ids for a guild id.
"""
return await self._config.guild(discord.Object(id=guild_id)).admin_role()
async def get_mod_role_ids(self, guild_id: int) -> List[int]:
"""
Gets the mod role ids for a guild id.
"""
return await self._config.guild(discord.Object(id=guild_id)).mod_role()
async def get_shared_api_tokens(self, service_name: str) -> Dict[str, str]:
"""
Gets the shared API tokens for a service
Parameters
----------
service_name: str
The service to get tokens for.
Returns
-------
Dict[str, str]
A Mapping of token names to tokens.
This mapping exists because some services have multiple tokens.
"""
return await self._config.custom(SHARED_API_TOKENS, service_name).all()
async def set_shared_api_tokens(self, service_name: str, **tokens: str):
"""
Sets shared API tokens for a service
In most cases, this should not be used. Users should instead be using the
``set api`` command
This will not clear existing values not specified.
Parameters
----------
service_name: str
The service to set tokens for
**tokens
token_name -> token
Examples
--------
Setting the api_key for youtube from a value in a variable ``my_key``
>>> await ctx.bot.set_shared_api_tokens("youtube", api_key=my_key)
"""
async with self._config.custom(SHARED_API_TOKENS, service_name).all() as group:
group.update(tokens)
self.dispatch("red_api_tokens_update", service_name, MappingProxyType(group))
async def remove_shared_api_tokens(self, service_name: str, *token_names: str):
"""
Removes shared API tokens
Parameters
----------
service_name: str
The service to remove tokens for
*token_names: str
The name of each token to be removed
Examples
--------
Removing the api_key for youtube
>>> await ctx.bot.remove_shared_api_tokens("youtube", "api_key")
"""
async with self._config.custom(SHARED_API_TOKENS, service_name).all() as group:
for name in token_names:
group.pop(name, None)
async def get_context(self, message, *, cls=commands.Context):
return await super().get_context(message, cls=cls)
async def process_commands(self, message: discord.Message):
"""
Same as base method, but dispatches an additional event for cogs
which want to handle normal messages differently to command
messages, without the overhead of additional get_context calls
per cog.
"""
if not message.author.bot:
ctx = await self.get_context(message)
await self.invoke(ctx)
else:
ctx = None
if ctx is None or ctx.valid is False:
self.dispatch("message_without_command", message)
@staticmethod
def list_packages():
"""Lists packages present in the cogs the folder"""
return os.listdir("cogs")
async def save_packages_status(self, packages):
await self._config.packages.set(packages)
async def add_loaded_package(self, pkg_name: str):
async with self._config.packages() as curr_pkgs:
if pkg_name not in curr_pkgs:
curr_pkgs.append(pkg_name)
async def remove_loaded_package(self, pkg_name: str):
async with self._config.packages() as curr_pkgs:
while pkg_name in curr_pkgs:
curr_pkgs.remove(pkg_name)
async def load_extension(self, spec: ModuleSpec):
# NB: this completely bypasses `discord.ext.commands.Bot._load_from_module_spec`
name = spec.name.split(".")[-1]
if name in self.extensions:
raise errors.PackageAlreadyLoaded(spec)
lib = spec.loader.load_module()
if not hasattr(lib, "setup"):
del lib
raise discord.ClientException(f"extension {name} does not have a setup function")
try:
if asyncio.iscoroutinefunction(lib.setup):
await lib.setup(self)
else:
lib.setup(self)
except Exception as e:
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
raise
else:
self._BotBase__extensions[name] = lib
def remove_cog(self, cogname: str):
cog = self.get_cog(cogname)
if cog is None:
return
for cls in inspect.getmro(cog.__class__):
try:
hook = getattr(cog, f"_{cls.__name__}__permissions_hook")
except AttributeError:
pass
else:
self.remove_permissions_hook(hook)
super().remove_cog(cogname)
cog.requires.reset()
for meth in self.rpc_handlers.pop(cogname.upper(), ()):
self.unregister_rpc_handler(meth)
async def is_automod_immune(
self, to_check: Union[discord.Message, commands.Context, discord.abc.User, discord.Role]
) -> bool:
"""
Checks if the user, message, context, or role should be considered immune from automated
moderation actions.
This will return ``False`` in direct messages.
Parameters
----------
to_check : `discord.Message` or `commands.Context` or `discord.abc.User` or `discord.Role`
Something to check if it would be immune
Returns
-------
bool
``True`` if immune
"""
guild = getattr(to_check, "guild", None)
if not guild:
return False
if isinstance(to_check, discord.Role):
ids_to_check = [to_check.id]
else:
author = getattr(to_check, "author", to_check)
try:
ids_to_check = [r.id for r in author.roles]
except AttributeError:
# webhook messages are a user not member,
# cheaper than isinstance
if author.bot and author.discriminator == "0000":
return True # webhooks require significant permissions to enable.
else:
ids_to_check.append(author.id)
immune_ids = await self._config.guild(guild).autoimmune_ids()
return any(i in immune_ids for i in ids_to_check)
@staticmethod
async def send_filtered(
destination: discord.abc.Messageable,
filter_mass_mentions=True,
filter_invite_links=True,
filter_all_links=False,
**kwargs,
):
"""
This is a convienience wrapper around
discord.abc.Messageable.send
It takes the destination you'd like to send to, which filters to apply
(defaults on mass mentions, and invite links) and any other parameters
normally accepted by destination.send
This should realistically only be used for responding using user provided
input. (unfortunately, including usernames)
Manually crafted messages which dont take any user input have no need of this
Returns
-------
discord.Message
The message that was sent.
"""
content = kwargs.pop("content", None)
if content:
if filter_mass_mentions:
content = common_filters.filter_mass_mentions(content)
if filter_invite_links:
content = common_filters.filter_invites(content)
if filter_all_links:
content = common_filters.filter_urls(content)
return await destination.send(content=content, **kwargs)
def add_cog(self, cog: commands.Cog):
if not isinstance(cog, commands.Cog):
raise RuntimeError(
f"The {cog.__class__.__name__} cog in the {cog.__module__} package does "
f"not inherit from the commands.Cog base class. The cog author must update "
f"the cog to adhere to this requirement."
)
if cog.__cog_name__ in self.cogs:
raise RuntimeError(f"There is already a cog named {cog.__cog_name__} loaded.")
if not hasattr(cog, "requires"):
commands.Cog.__init__(cog)
added_hooks = []
try:
for cls in inspect.getmro(cog.__class__):
try:
hook = getattr(cog, f"_{cls.__name__}__permissions_hook")
except AttributeError:
pass
else:
self.add_permissions_hook(hook)
added_hooks.append(hook)
super().add_cog(cog)
self.dispatch("cog_add", cog)
if "permissions" not in self.extensions:
cog.requires.ready_event.set()
except Exception:
for hook in added_hooks:
try:
self.remove_permissions_hook(hook)
except Exception:
# This shouldn't be possible
log.exception(
"A hook got extremely screwed up, "
"and could not be removed properly during another error in cog load."
)
del cog
raise
def add_command(self, command: commands.Command) -> None:
if not isinstance(command, commands.Command):
raise RuntimeError("Commands must be instances of `kronbot.core.commands.Command`")
super().add_command(command)
permissions_not_loaded = "permissions" not in self.extensions
self.dispatch("command_add", command)
if permissions_not_loaded:
command.requires.ready_event.set()
if isinstance(command, commands.Group):
for subcommand in set(command.walk_commands()):
self.dispatch("command_add", subcommand)
if permissions_not_loaded:
subcommand.requires.ready_event.set()
def remove_command(self, name: str) -> None:
command = super().remove_command(name)
if not command:
return
command.requires.reset()
if isinstance(command, commands.Group):
for subcommand in set(command.walk_commands()):
subcommand.requires.reset()
def clear_permission_rules(self, guild_id: Optional[int], **kwargs) -> None:
"""Clear all permission overrides in a scope.
Parameters
----------
guild_id : Optional[int]
The guild ID to wipe permission overrides for. If
``None``, this will clear all global rules and leave all
guild rules untouched.
**kwargs
Keyword arguments to be passed to each required call of
``commands.Requires.clear_all_rules``
"""
for cog in self.cogs.values():
cog.requires.clear_all_rules(guild_id, **kwargs)
for command in self.walk_commands():
command.requires.clear_all_rules(guild_id, **kwargs)
def add_permissions_hook(self, hook: commands.CheckPredicate) -> None:
"""Add a permissions hook.
Permissions hooks are check predicates which are called before
calling `Requires.verify`, and they can optionally return an
override: ``True`` to allow, ``False`` to deny, and ``None`` to
default to normal behaviour.
Parameters
----------
hook
A command check predicate which returns ``True``, ``False``
or ``None``.
"""
self._permissions_hooks.append(hook)
def remove_permissions_hook(self, hook: commands.CheckPredicate) -> None:
"""Remove a permissions hook.
Parameters are the same as those in `add_permissions_hook`.
Raises
------
ValueError
If the permissions hook has not been added.
"""
self._permissions_hooks.remove(hook)
async def verify_permissions_hooks(self, ctx: commands.Context) -> Optional[bool]:
"""Run permissions hooks.
Parameters
----------
ctx : commands.Context
The context for the command being invoked.
Returns
-------
Optional[bool]
``False`` if any hooks returned ``False``, ``True`` if any
hooks return ``True`` and none returned ``False``, ``None``
otherwise.
"""
hook_results = []
for hook in self._permissions_hooks:
result = await discord.utils.maybe_coroutine(hook, ctx)
if result is not None:
hook_results.append(result)
if hook_results:
if all(hook_results):
ctx.permission_state = commands.PermState.ALLOWED_BY_HOOK
return True
else:
ctx.permission_state = commands.PermState.DENIED_BY_HOOK
return False
async def get_owner_notification_destinations(self) -> List[discord.abc.Messageable]:
"""
Gets the users and channels to send to
"""
await self.wait_until_kron_ready()
destinations = []
opt_outs = await self._config.owner_opt_out_list()
team_ids = () if not self._use_team_features else self.owner_ids
for user_id in set((self.owner_id, *self._co_owners, *team_ids)):
if user_id not in opt_outs:
user = self.get_user(user_id)
if user and not user.bot: # user.bot is possible with flags and teams
destinations.append(user)
else:
log.warning(
"Owner with ID %s is missing in user cache,"
" ignoring owner notification destination.",
user_id,
)
channel_ids = await self._config.extra_owner_destinations()
for channel_id in channel_ids:
channel = self.get_channel(channel_id)
if channel:
destinations.append(channel)
else:
log.warning(
"Channel with ID %s is not available,"
" ignoring owner notification destination.",
channel_id,
)
return destinations
async def send_to_owners(self, content=None, **kwargs):
"""
This sends something to all owners and their configured extra destinations.
This takes the same arguments as discord.abc.Messageable.send
This logs failing sends
"""
destinations = await self.get_owner_notification_destinations()
async def wrapped_send(location, content=None, **kwargs):
try:
await location.send(content, **kwargs)
except Exception as _exc:
log.error(
"I could not send an owner notification to %s (%s)",
location,
location.id,
exc_info=_exc,
)
sends = [wrapped_send(d, content, **kwargs) for d in destinations]
await asyncio.gather(*sends)
async def wait_until_kron_ready(self):
"""Wait until our post connection startup is done."""
await self._kron_ready.wait()
async def _delete_delay(self, ctx: commands.Context):
"""Currently used for:
* delete delay"""
guild = ctx.guild
if guild is None:
return
message = ctx.message
delay = await self._config.guild(guild).delete_delay()
if delay == -1:
return
async def _delete_helper(m):
with contextlib.suppress(discord.HTTPException):
await m.delete()
log.debug("Deleted command msg {}".format(m.id))
await asyncio.sleep(delay)
await _delete_helper(message)
class Kron(KronBase, discord.AutoShardedClient):
"""
You're welcome Caleb.
"""
async def logout(self):
"""Logs out of Discord and closes all connections."""
await super().logout()
await drivers.get_driver_class().teardown()
try:
await self.rpc.close()
except AttributeError:
pass
async def shutdown(self, *, restart: bool = False):
"""Gracefully quit Kron.
The program will exit with code :code:`0` by default.
Parameters
----------
restart : bool
If :code:`True`, the program will exit with code :code:`26`. If the
launcher sees this, it will attempt to restart the bot.
"""
if not restart:
self._shutdown_mode = ExitCodes.SHUTDOWN
else:
self._shutdown_mode = ExitCodes.RESTART
await self.logout()
sys.exit(self._shutdown_mode)
class ExitCodes(IntEnum):
# This needs to be an int enum to be used
# with sys.exit
CRITICAL = 1
SHUTDOWN = 0
RESTART = 26 | PypiClean |
/AutoRequirements-0.1.0.tar.gz/AutoRequirements-0.1.0/src/__init__.py | from os import name as _name, system as _system, get_terminal_size as _terminal_size, terminal_size
from sys import stdout as _stdout
from time import sleep as _sleep
from threading import Thread as _thread
if _name == 'nt':
from ctypes import c_int, c_byte, Structure, byref, windll
class _CursorInfo(Structure):
_fields_ = [("size", c_int),
("visible", c_byte)]
class System:
"""
1 variable:
Windows | tells if the user is on Windows OS or not
5 functions:
Init() | initialize the terminal to allow the use of colors
Clear() | clear the terminal
Title() | set the title of terminal, only for Windows
Size() | set the size of terminal, only for Windows
Command() | enter a shell command
"""
Windows = _name == 'nt'
def Init():
_system('')
def Clear():
return _system("cls" if System.Windows else "clear")
def Title(title: str):
if System.Windows:
return _system(f"title {title}")
def Size(x: int, y: int):
if System.Windows:
return _system(f"mode {x}, {y}")
def Command(command: str):
return _system(command)
class Cursor:
"""
2 functions:
HideCursor() | hides the white blinking in the terminal
ShowCursor() | shows the white blinking in the terminal
"""
def HideCursor():
if _name == 'nt':
Cursor._cursor(False)
elif _name == 'posix':
_stdout.write("\033[?25l")
_stdout.flush()
def ShowCursor():
if _name == 'nt':
Cursor._cursor(True)
elif _name == 'posix':
_stdout.write("\033[?25h")
_stdout.flush()
""" ! developper area ! """
def _cursor(visible: bool):
ci = _CursorInfo()
handle = windll.kernel32.GetStdHandle(-11)
windll.kernel32.GetConsoleCursorInfo(handle, byref(ci))
ci.visible = visible
windll.kernel32.SetConsoleCursorInfo(handle, byref(ci))
class _MakeColors:
""" ! developper area ! """
def _makeansi(col: str, text: str) -> str:
return f"\033[38;2;{col}m{text}\033[38;2;255;255;255m"
def _rmansi(col: str) -> str:
return col.replace('\033[38;2;', '').replace('m','').replace('50m', '').replace('\x1b[38', '')
def _makergbcol(var1: list, var2: list) -> list:
col = list(var1[:12])
for _col in var2[:12]:
col.append(_col)
for _col in reversed(col):
col.append(_col)
return col
def _start(color: str) -> str:
return f"\033[38;2;{color}m"
def _end() -> str:
return "\033[38;2;255;255;255m"
def _maketext(color: str, text: str, end: bool = False) -> str:
end = _MakeColors._end() if end else ""
return color+text+end
def _getspaces(text: str) -> int:
return len(text) - len(text.lstrip())
def _makerainbow(*colors) -> list:
colors = [color[:24] for color in colors]
rainbow = []
for color in colors:
for col in color:
rainbow.append(col)
return rainbow
def _reverse(colors: list) -> list:
_colors = list(colors)
for col in reversed(_colors):
colors.append(col)
return colors
def _mixcolors(col1: str, col2: str, _reverse: bool = True) -> list:
col1, col2 = _MakeColors._rmansi(col=col1), _MakeColors._rmansi(col=col2)
fade1 = Colors.StaticMIX([col1, col2], _start=False)
fade2 = Colors.StaticMIX([fade1, col2], _start=False)
fade3 = Colors.StaticMIX([fade1, col1], _start=False)
fade4 = Colors.StaticMIX([fade2, col2], _start=False)
fade5 = Colors.StaticMIX([fade1, fade3], _start=False)
fade6 = Colors.StaticMIX([fade3, col1], _start=False)
fade7 = Colors.StaticMIX([fade1, fade2], _start=False)
mixed = [col1, fade6, fade3, fade5, fade1, fade7, fade2, fade4, col2]
return _MakeColors._reverse(colors=mixed) if _reverse else mixed
class Colors:
"""
54 variables (colors)
3 lists:
static_colors | colors that are static, ex: 'red' (can't be faded)
dynamic_colors | colors that are dynamic, ex: 'blue_to_purple' (can be faded)
all_colors | every color of static_colors and dynamic_colors
3 functions:
StaticRGB() | create your own fix/static color
DynamicRGB() | create your own faded/dynamic color (soon...)
StaticMIX() | mix two or more static colors
DynamicMIX() | mix two or more dynamic colors
Symbol() | create a colored symbol, ex: '[!]'
"""
def StaticRGB(r: int, g: int, b: int) -> str:
return _MakeColors._start(f"{r};{g};{b}")
def DynamicRGB(r1: int, g1: int, b1: int, r2: int,
g2: int, b2: int) -> list: ...
def StaticMIX(colors: list, _start: bool = True) -> str:
rgb = []
for col in colors:
col = _MakeColors._rmansi(col=col)
col = col.split(';')
r = int(int(col[0]))
g = int(int(col[1]))
b = int(int(col[2]))
rgb.append([r, g, b])
r = round(sum(rgb[0] for rgb in rgb) / len(rgb))
g = round(sum(rgb[1] for rgb in rgb) / len(rgb))
b = round(sum(rgb[2] for rgb in rgb) / len(rgb))
rgb = f'{r};{g};{b}'
return _MakeColors._start(rgb) if _start else rgb
def DynamicMIX(colors: list):
_colors = []
for color in colors:
if colors.index(color) == len(colors) - 1:
break
_colors.append([color, colors[colors.index(color) + 1]])
colors = [_MakeColors._mixcolors(col1=color[0], col2=color[1], _reverse=False) for color in _colors]
final = []
for col in colors:
for col in col:
final.append(col)
return _MakeColors._reverse(colors=final)
""" symbols """
def Symbol(symbol: str, col: str, col_left_right: str, left: str = '[', right: str = ']') -> str:
return f"{col_left_right}{left}{col}{symbol}{col_left_right}{right}{Col.reset}"
""" dynamic colors """
black_to_white = ["m;m;m"]
black_to_red = ["m;0;0"]
black_to_green = ["0;m;0"]
black_to_blue = ["0;0;m"]
white_to_black = ["n;n;n"]
white_to_red = ["255;n;n"]
white_to_green = ["n;255;n"]
white_to_blue = ["n;n;255"]
red_to_black = ["n;0;0"]
red_to_white = ["255;m;m"]
red_to_yellow = ["255;m;0"]
red_to_purple = ["255;0;m"]
green_to_black = ["0;n;0"]
green_to_white = ["m;255;m"]
green_to_yellow = ["m;255;0"]
green_to_cyan = ["0;255;m"]
blue_to_black = ["0;0;n"]
blue_to_white = ["m;m;255"]
blue_to_cyan = ["0;m;255"]
blue_to_purple = ["m;0;255"]
yellow_to_red = ["255;n;0"]
yellow_to_green = ["n;255;0"]
purple_to_red = ["255;0;n"]
purple_to_blue = ["n;0;255"]
cyan_to_green = ["0;255;n"]
cyan_to_blue = ["0;n;255"]
red_to_blue = ...
red_to_green = ...
green_to_blue = ...
green_to_red = ...
blue_to_red = ...
blue_to_green = ...
rainbow = ...
""" static colors """
red = _MakeColors._start('255;0;0')
green = _MakeColors._start('0;255;0')
blue = _MakeColors._start('0;0;255')
white = _MakeColors._start('255;255;255')
black = _MakeColors._start('0;0;0')
gray = _MakeColors._start('150;150;150')
yellow = _MakeColors._start('255;255;0')
purple = _MakeColors._start('255;0;255')
cyan = _MakeColors._start('0;255;255')
orange = _MakeColors._start('255;150;0')
pink = _MakeColors._start('255;0;150')
turquoise = _MakeColors._start('0;150;255')
light_gray = _MakeColors._start('200;200;200')
dark_gray = _MakeColors._start('100;100;100')
light_red = _MakeColors._start('255;100;100')
light_green = _MakeColors._start('100;255;100')
light_blue = _MakeColors._start('100;100;255')
dark_red = _MakeColors._start('100;0;0')
dark_green = _MakeColors._start('0;100;0')
dark_blue = _MakeColors._start('0;0;100')
reset = white
""" ! developper area ! """
col = (list, str)
dynamic_colors = [
black_to_white, black_to_red, black_to_green, black_to_blue,
white_to_black, white_to_red, white_to_green, white_to_blue,
red_to_black, red_to_white, red_to_yellow, red_to_purple,
green_to_black, green_to_white, green_to_yellow, green_to_cyan,
blue_to_black, blue_to_white, blue_to_cyan, blue_to_purple,
yellow_to_red, yellow_to_green,
purple_to_red, purple_to_blue,
cyan_to_green, cyan_to_blue
]
for color in dynamic_colors:
_col = 20
reversed_col = 220
dbl_col = 20
dbl_reversed_col = 220
content = color[0]
color.pop(0)
for _ in range(12):
if 'm' in content:
result = content.replace('m', str(_col))
color.append(result)
elif 'n' in content:
result = content.replace('n', str(reversed_col))
color.append(result)
_col += 20
reversed_col -= 20
for _ in range(12):
if 'm' in content:
result = content.replace('m', str(dbl_reversed_col))
color.append(result)
elif 'n' in content:
result = content.replace('n', str(dbl_col))
color.append(result)
dbl_col += 20
dbl_reversed_col -= 20
red_to_blue = _MakeColors._makergbcol(red_to_purple, purple_to_blue)
red_to_green = _MakeColors._makergbcol(red_to_yellow, yellow_to_green)
green_to_blue = _MakeColors._makergbcol(green_to_cyan, cyan_to_blue)
green_to_red = _MakeColors._makergbcol(green_to_yellow, yellow_to_red)
blue_to_red = _MakeColors._makergbcol(blue_to_purple, purple_to_red)
blue_to_green = _MakeColors._makergbcol(blue_to_cyan, cyan_to_green)
rainbow = _MakeColors._makerainbow(
red_to_green, green_to_blue, blue_to_red)
for _col in (
red_to_blue, red_to_green,
green_to_blue, green_to_red,
blue_to_red, blue_to_green
): dynamic_colors.append(_col)
dynamic_colors.append(rainbow)
static_colors = [
red, green, blue,
white, black, gray,
yellow, purple, cyan,
orange, pink, turquoise,
light_gray, dark_gray,
light_red, light_green, light_blue,
dark_red, dark_green, dark_blue,
reset
]
all_colors = [color for color in dynamic_colors]
for color in static_colors:
all_colors.append(color)
Col = Colors
class Colorate:
"""
6 functions:
Static colors:
Color() | color a text with a static color
Error() | make an error with red text and advanced arguments
Format() | set different colors for different parts of a text
Dynamic colors:
Vertical() | fade a text vertically
Horizontal() | fade a text horizontally
Diagonal() | fade a text diagonally
DiagonalBackwards() | fade a text diagonally but backwards
"""
""" fix/static colors """
def Color(color: str, text: str, end: bool = True) -> str:
return _MakeColors._maketext(color=color, text=text, end=end)
def Error(text: str, color: str = Colors.red, end: bool = False, spaces: bool = 1, enter: bool = True, wait: int = False) -> str:
content = _MakeColors._maketext(
color=color, text="\n" * spaces + text, end=end)
if enter:
var = input(content)
else:
print(content)
var = None
if wait is True:
exit()
elif wait is not False:
_sleep(wait)
return var
""" faded/dynamic colors"""
def Vertical(color: list, text: str, speed: int = 1, start: int = 0, stop: int = 0, cut: int = 0, fill: bool = False) -> str:
color = color[cut:]
lines = text.splitlines()
result = ""
nstart = 0
color_n = 0
for lin in lines:
colorR = color[color_n]
if fill:
result += " " * \
_MakeColors._getspaces(
lin) + "".join(_MakeColors._makeansi(colorR, x) for x in lin.strip()) + "\n"
else:
result += " " * \
_MakeColors._getspaces(
lin) + _MakeColors._makeansi(colorR, lin.strip()) + "\n"
if nstart != start:
nstart += 1
continue
if lin.rstrip():
if (
stop == 0
and color_n + speed < len(color)
or stop != 0
and color_n + speed < stop
):
color_n += speed
elif stop == 0:
color_n = 0
else:
color_n = stop
return result.rstrip()
def Horizontal(color: list, text: str, speed: int = 1, cut: int = 0) -> str:
color = color[cut:]
lines = text.splitlines()
result = ""
for lin in lines:
carac = list(lin)
color_n = 0
for car in carac:
colorR = color[color_n]
result += " " * \
_MakeColors._getspaces(
car) + _MakeColors._makeansi(colorR, car.strip())
if color_n + speed < len(color):
color_n += speed
else:
color_n = 0
result += "\n"
return result.rstrip()
def Diagonal(color: list, text: str, speed: int = 1, cut: int = 0) -> str:
color = color[cut:]
lines = text.splitlines()
result = ""
color_n = 0
for lin in lines:
carac = list(lin)
for car in carac:
colorR = color[color_n]
result += " " * \
_MakeColors._getspaces(
car) + _MakeColors._makeansi(colorR, car.strip())
if color_n + speed < len(color):
color_n += speed
else:
color_n = 1
result += "\n"
return result.rstrip()
def DiagonalBackwards(color: list, text: str, speed: int = 1, cut: int = 0) -> str:
color = color[cut:]
lines = text.splitlines()
result = ""
resultL = ''
color_n = 0
for lin in lines:
carac = list(lin)
carac.reverse()
resultL = ''
for car in carac:
colorR = color[color_n]
resultL = " " * \
_MakeColors._getspaces(
car) + _MakeColors._makeansi(colorR, car.strip()) + resultL
if color_n + speed < len(color):
color_n += speed
else:
color_n = 0
result = result + '\n' + resultL
return result.strip()
def Format(text: str, second_chars: list, mode, principal_col: Colors.col, second_col: str):
if mode == Colorate.Vertical:
ctext = mode(principal_col, text, fill=True)
else:
ctext = mode(principal_col, text)
ntext = ""
for x in ctext:
if x in second_chars:
x = Colorate.Color(second_col, x)
ntext += x
return ntext
class Anime:
"""
2 functions:
Fade() | make a small animation with a changing color text, using a dynamic color
Move() | make a small animation moving the text from left to right
Bar() | a fully customizable charging bar
Anime() | a mix between Fade() and Move(), available soon
"""
def Fade(text: str, color: list, mode, time=True, interval=0.05, hide_cursor: bool = True, enter: bool = False):
if hide_cursor:
Cursor.HideCursor()
if type(time) == int:
time *= 15
global passed
passed = False
if enter:
th = _thread(target=Anime._input)
th.start()
if time is True:
while True:
if passed is not False:
break
Anime._anime(text, color, mode, interval)
ncolor = color[1:]
ncolor.append(color[0])
color = ncolor
else:
for _ in range(time):
if passed is not False:
break
Anime._anime(text, color, mode, interval)
ncolor = color[1:]
ncolor.append(color[0])
color = ncolor
if hide_cursor:
Cursor.ShowCursor()
def Move(text: str, color: list, time = True, interval = 0.01, hide_cursor: bool = True, enter: bool = False):
if hide_cursor:
Cursor.HideCursor()
if type(time) == int:
time *= 15
global passed
passed = False
columns = _terminal_size().columns
if enter:
th = _thread(target = Anime._input)
th.start()
count = 0
mode = 1
if time is True:
while not passed:
if mode == 1:
if count >= (columns - (max(len(txt) for txt in text.splitlines()) + 1)):
mode = 2
count += 1
elif mode == 2:
if count <= 0:
mode = 1
count -= 1
Anime._anime('\n'.join((' ' * count) + line for line in text.splitlines()), color or [], lambda a, b: b, interval)
else:
for _ in range(time):
if passed:
break
if mode == 1:
if count >= (columns - (max(len(txt) for txt in text.splitlines()) + 1)):
mode = 2
elif mode == 2:
if count <= 0:
mode = 1
Anime._anime('\n'.join((' ' * count) + line for line in text.splitlines()), color or [], lambda a, b: b, interval)
count += 1
if hide_cursor:
Cursor.ShowCursor()
def Bar(length, carac_0: str = '[ ]', carac_1: str = '[0]', color: list = Colors.white, mode=Colorate.Horizontal, interval: int = 0.5, hide_cursor: bool = True, enter: bool = False, center: bool = False):
if hide_cursor:
Cursor.HideCursor()
if type(color) == list:
while not length <= len(color):
ncolor = list(color)
for col in ncolor:
color.append(col)
global passed
passed = False
if enter:
th = _thread(target=Anime._input)
th.start()
for i in range(length + 1):
bar = carac_1 * i + carac_0 * (length - i)
if passed:
break
if type(color) == list:
if center:
print(Center.XCenter(mode(color, bar)))
else:
print(mode(color, bar))
else:
if center:
print(Center.XCenter(color + bar))
else:
print(color + bar)
_sleep(interval)
System.Clear()
if hide_cursor:
Cursor.ShowCursor()
def Anime() -> None: ...
""" ! developper area ! """
def _anime(text: str, color: list, mode, interval: int):
_stdout.write(mode(color, text))
_stdout.flush()
_sleep(interval)
System.Clear()
def _input() -> str:
global passed
passed = input()
return passed
class Write:
"""
2 functions:
Print() | print a text to the terminal while coloring it and with a fade and write effect
Input() | same than Print() but adds an input to the end and returns its valor
"""
def Print(text: str, color: list, interval=0.05, hide_cursor: bool = True, end: str = Colors.reset) -> None:
if hide_cursor:
Cursor.HideCursor()
Write._write(text=text, color=color, interval=interval)
_stdout.write(end)
_stdout.flush()
if hide_cursor:
Cursor.ShowCursor()
def Input(text: str, color: list, interval=0.05, hide_cursor: bool = True, input_color: str = Colors.reset, end: str = Colors.reset) -> str:
if hide_cursor:
Cursor.HideCursor()
Write._write(text=text, color=color, interval=interval)
valor = input(input_color)
_stdout.write(end)
_stdout.flush()
if hide_cursor:
Cursor.ShowCursor()
return valor
" ! developper area ! "
def _write(text: str, color, interval: int):
lines = list(text)
if type(color) == list:
while not len(lines) <= len(color):
ncolor = list(color)
for col in ncolor:
color.append(col)
n = 0
for line in lines:
if type(color) == list:
_stdout.write(_MakeColors._makeansi(color[n], line))
else:
_stdout.write(color + line)
_stdout.flush()
_sleep(interval)
if line.strip():
n += 1
class Center:
"""
2 functions:
XCenter() | center the given text in X cords
YCenter() | center the given text in Y cords
Center() | center the given text in X and Y cords
GroupAlign() | align the given text in a group
TextAlign() | align the given text per lines
NOTE: the functions of the class can be broken if the text argument has colors in it
"""
center = 'CENTER'
left = 'LEFT'
right = 'RIGHT'
def XCenter(text: str, spaces: int = None, icon: str = " "):
if spaces is None:
spaces = Center._xspaces(text=text)
return "\n".join((icon * spaces) + text for text in text.splitlines())
def YCenter(text: str, spaces: int = None, icon: str = "\n"):
if spaces is None:
spaces = Center._yspaces(text=text)
return icon * spaces + "\n".join(text.splitlines())
def Center(text: str, xspaces: int = None, yspaces: int = None, xicon: str = " ", yicon: str = "\n") -> str:
if xspaces is None:
xspaces = Center._xspaces(text=text)
if yspaces is None:
yspaces = Center._yspaces(text=text)
text = yicon * yspaces + "\n".join(text.splitlines())
return "\n".join((xicon * xspaces) + text for text in text.splitlines())
def GroupAlign(text: str, align: str = center):
align = align.upper()
if align == Center.center:
return Center.XCenter(text)
elif align == Center.left:
return text
elif align == Center.right:
length = _terminal_size().columns
maxLineSize = max(len(line) for line in text.splitlines())
return '\n'.join((' ' * (length - maxLineSize)) + line for line in text.splitlines())
else:
raise Center.BadAlignment()
def TextAlign(text: str, align: str = center):
align = align.upper()
mlen = max(len(i) for i in text.splitlines())
if align == Center.center:
return "\n".join((' ' * int(mlen/2 - len(lin)/2)) + lin for lin in text.splitlines())
elif align == Center.left:
return text
elif align == Center.right:
ntext = '\n'.join(' ' * (mlen - len(lin)) + lin for lin in text.splitlines())
return ntext
else:
raise Center.BadAlignment()
""" ! developper area ! """
def _xspaces(text: str):
try:
col = _terminal_size().columns
except OSError:
return 0
textl = text.splitlines()
ntextl = max((len(v) for v in textl if v.strip()), default = 0)
return int((col - ntextl) / 2)
def _yspaces(text: str):
try:
lin = _terminal_size().lines
except OSError:
return 0
textl = text.splitlines()
ntextl = len(textl)
return int((lin - ntextl) / 2)
class BadAlignment(Exception):
def __init__(self):
super().__init__("Choose a correct alignment: Center.center / Center.left / Center.right")
class Add:
"""
1 function:
Add() | allow you to add a text to another, and even center it
"""
def Add(banner1, banner2, spaces=0, center=False):
if center:
split1 = len(banner1.splitlines())
split2 = len(banner2.splitlines())
if split1 > split2:
spaces = (split1 - split2) // 2
elif split2 > split1:
spaces = (split2 - split1) // 2
else:
spaces = 0
if spaces > max(len(banner1.splitlines()), len(banner2.splitlines())):
# raise Banner.MaximumSpaces(spaces)
spaces = max(len(banner1.splitlines()), len(banner2.splitlines()))
ban1 = banner1.splitlines()
ban2 = banner2.splitlines()
ban1count = len(ban1)
ban2count = len(ban2)
size = Add._length(ban1)
ban1 = Add._edit(ban1, size)
ban1line = 0
ban2line = 0
text = ''
for _ in range(spaces):
if ban1count >= ban2count:
ban1data = ban1[ban1line]
ban2data = ''
ban1line += 1
else:
ban1data = " " * size
ban2data = ban2[ban2line]
ban2line += 1
text = text + ban1data + ban2data + '\n'
while ban1line < ban1count or ban2line < ban2count:
ban1data = ban1[ban1line] if ban1line < ban1count else " " * size
ban2data = ban2[ban2line] if ban2line < ban2count else ""
text = text + ban1data + ban2data + '\n'
ban1line += 1
ban2line += 1
return text
""" ! developper area ! """
class MaximumSpaces(Exception):
def __init__(self, spaces: str):
super().__init__(f"Too much spaces [{spaces}].")
def _length(ban1):
bigestline = 0
for line in ban1:
if len(line) > bigestline:
bigestline = len(line)
return bigestline
def _edit(ban1, size):
return [line + (size - len(line)) * " " for line in ban1]
class Banner:
"""
2 functions:
SimpleCube() | create a simple cube with the given text
Lines() | create a text framed by two lines
Arrow() | create a custom arrow
"""
def Box(content: str, up_left: str, up_right: str, down_left: str, down_right: str, left_line: str, up_line: str, right_line: str, down_line: str) -> str:
l = 0
lines = content.splitlines()
for a in lines:
if len(a) > l:
l = len(a)
if l % 2 == 1:
l += 1
box = up_left + (up_line * l) + up_right + "\n"
#box += "║ " + (" " * int(l / 2)) + (" " * int(l / 2)) + " ║\n"
for line in lines:
box += left_line + " " + line + (" " * int((l - len(line)))) + " " + right_line + "\n"
box += down_left + (down_line * l) + down_right + "\n"
return box
def SimpleCube(content: str) -> str:
l = 0
lines = content.splitlines()
for a in lines:
if len(a) > l:
l = len(a)
if l % 2 == 1:
l += 1
box = "__" + ("_" * l) + "__\n"
box += "| " + (" " * int(l / 2)) + (" " * int(l / 2)) + " |\n"
for line in lines:
box += "| " + line + (" " * int((l - len(line)))) + " |\n"
box += "|_" + ("_" * l) + "_|\n"
return box
def DoubleCube(content: str) -> str:
return Box.Box(content, "╔═", "═╗", "╚═", "═╝", "║", "═", "║", "═")
def Lines(content: str, color = None, mode = Colorate.Horizontal, line = '═', pepite = 'ቐ') -> str:
l = 1
for c in content.splitlines():
if len(c) > l:
l = len(c)
mode = Colorate.Horizontal if color is not None else (lambda **kw: kw['text'])
box = mode(text = f"─{line*l}{pepite * 2}{line*l}─", color = color)
assembly = box + "\n" + content + "\n" + box
final = ''
for lines in assembly.splitlines():
final += Center.XCenter(lines) + "\n"
return final
def Arrow(icon: str = 'a', size: int = 2, number: int = 2, direction = 'right') -> str:
spaces = ' ' * (size + 1)
_arrow = ''
structure = (size + 2, [size * 2, size * 2])
count = 0
if direction == 'right':
for i in range(structure[1][0]):
line = (structure[0] * icon)
_arrow += (' ' * count) + spaces.join([line] * (number)) + '\n'
count += 2
for i in range(structure[1][0] + 1):
line = (structure[0] * icon)
_arrow += (' ' * count) + spaces.join([line] * (number)) + '\n'
count -= 2
elif direction == 'left':
for i in range(structure[1][0]):
count += 2
for i in range(structure[1][0]):
line = (structure[0] * icon)
_arrow += (' ' * count) + spaces.join([line] * (number)) + '\n'
count -= 2
for i in range(structure[1][0] + 1):
line = (structure[0] * icon)
_arrow += (' ' * count) + spaces.join([line] * (number)) + '\n'
count += 2
return _arrow
Box = Banner
System.Init() | PypiClean |
/HSTools-0.0.3-py3-none-any.whl/hstools/funcs/describe.py |
import sys
import json
import yaml
import argparse
from itertools import groupby
from hstools import hydroshare, log
logger = log.logger
def get_tree(group, items, path):
sep = lambda i: i.split('/', 1)
head = [i for i in items if len(sep(i)) == 2]
tail = [i for i in items if len(sep(i)) == 1]
gv = groupby(sorted(head), lambda i: sep(i)[0])
return group, dict([(i, path+i) for i in tail] + [get_tree(g, [sep(i)[1] for i in v], path+g+'/') for g, v in gv])
def tree_print(d, indent=0, prefix=''):
# file_middle = '├──'
# folder_last = '│ '
folder = '└──'
for key, value in d.items():
print(' ' * indent + f'{prefix} {str(key)}')
if isinstance(value, dict):
next_prefix = folder
tree_print(value, indent+1, next_prefix)
def set_usage(parser):
optionals = []
for option in parser._get_optional_actions():
if len(option.option_strings) > 0:
ostring = f'[{option.option_strings[0]}]'
if '--' in ostring:
# place '--' args at end of usage
optionals.append(ostring)
else:
optionals.insert(0, ostring)
positionals = []
for pos in parser._get_positional_actions():
positionals.append(pos.dest)
parser.usage = f'%(prog)s {" ".join(positionals)} {" ".join(optionals)}'
def add_arguments(parser):
parser.description = long_help()
parser.add_argument('resource_id',
nargs='+', type=str,
help='unique HydroShare resource identifier')
parser.add_argument('-y', '--yaml', default=True, action='store_true',
help='output in yaml format')
parser.add_argument('-j', '--json', default=False, action='store_true',
help='output in json format')
parser.add_argument('-l', '--long', default=False, action='store_true',
help='long output format')
parser.add_argument('-t', '--terms', nargs='+', type=str,
help='specific metadata terms to return, e.g. ' +
'authors, abstract, date_created, etc.', )
parser.add_argument('-v', default=False, action='store_true',
help='verbose output')
set_usage(parser)
def main(args):
if args.v:
log.set_verbose()
# connect to hydroshare
hs = hydroshare.hydroshare()
if hs is None:
raise Exception(f'Connection to HydroShare failed')
sys.exit(1)
if args.resource_id:
print('-' * 50)
# loop through input resources
for r in args.resource_id:
try:
meta = hs.getResourceMetadata(r)
meta_dict = {k: v for k, v in vars(meta).items()
if not k.startswith('_')}
if args.terms:
# filter based on specified data types
meta_filtered = {}
for term in args.terms:
if term in meta_dict.keys():
meta_filtered[term] = meta_dict[term]
else:
logger.error(f' - Unknown metadata term {term}')
meta_dict = meta_filtered
# if not verbose, remove some of the metadata
elif not args.long:
short_keys = ['abstract',
'authors',
'creators',
'date_created',
'title']
meta_dict = {k: meta_dict[k] for k in short_keys}
# clean strings
for k, v in meta_dict.items():
if type(v) == type(str):
meta_dict[k] = v.replace('\n', '')
# shorten author and creator data
meta_dict['authors'] = ';'.join(meta_dict['authors'])
creator_values = []
for creator in meta_dict['creators']:
creator_values.append(creator['name'])
meta_dict['creators'] = ';'.join(creator_values)
if args.yaml:
class literal(str):
pass
def literal_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str',
data, style='|')
yaml.add_representer(literal, literal_presenter)
v = meta_dict['abstract']
meta_dict['abstract'] = literal(v)
print(yaml.dump(meta_dict))
if args.json:
# query scientific metadata
print(json.dumps(meta_dict,
indent=4,
sort_keys=True))
# organize files for tree printing
urls = []
for file_info in hs.getResourceFiles(r):
rpth = file_info['url'].split('contents/')[-1]
urls.append(rpth)
ftree = dict([get_tree('tree', urls, '')])['tree']
tree_print(ftree)
print('-' * 50)
except Exception as e:
print(e)
def short_help():
return 'Describe metadata and files'
def long_help():
return """Describe the metadata and files of a HydroShare resource. By default a short summary is provided by the "-v" flag can be used for verbose output."""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=long_help())
add_arguments(parser)
args = parser.parse_args()
main(args) | PypiClean |
/Faker-19.3.1.tar.gz/Faker-19.3.1/faker/providers/lorem/cs_CZ/__init__.py | from typing import Dict
from .. import Provider as LoremProvider
class Provider(LoremProvider):
"""Implement lorem provider for ``cs_CZ`` locale.
Word list is drawn from the SYN2015.
(representative corpus of contemporary written Czech published in December 2015)
The word list is a list of the 2000 most common lemmas. Abbreviations and first names were removed.
Sources:
- https://wiki.korpus.cz/lib/exe/fetch.php/seznamy:syn2015_lemma_utf8.zip
"""
word_list = (
"a",
"aby",
"adresa",
"Afrika",
"agentura",
"akce",
"aktivita",
"aktivní",
"aktuální",
"ale",
"alespoň",
"alkohol",
"americký",
"Amerika",
"analýza",
"anebo",
"anglický",
"ani",
"aniž",
"ano",
"aplikace",
"architekt",
"areál",
"armáda",
"asi",
"aspoň",
"atmosféra",
"auto",
"autobus",
"autor",
"avšak",
"ačkoli",
"ať",
"až",
"babička",
"banka",
"barevný",
"barva",
"bavit",
"bez",
"bezpečnost",
"bezpečnostní",
"bezpečný",
"blok",
"blízko",
"blízký",
"blížit",
"bod",
"bohatý",
"bohužel",
"boj",
"bojovat",
"bok",
"bolest",
"bota",
"boží",
"branka",
"bratr",
"britský",
"Brno",
"brněnský",
"brzy",
"brána",
"bránit",
"brát",
"budoucnost",
"budoucí",
"budova",
"buď",
"buňka",
"bydlet",
"byt",
"byť",
"bát",
"bílý",
"být",
"bývalý",
"bývat",
"během",
"běžet",
"běžný",
"břeh",
"březen",
"břicho",
"bůh",
"celek",
"celkem",
"celkový",
"celý",
"cena",
"centrum",
"cesta",
"charakter",
"chladný",
"chlap",
"chlapec",
"chodba",
"chodit",
"chovat",
"chování",
"chránit",
"chtít",
"chuť",
"chvilka",
"chvíle",
"chyba",
"chybět",
"chystat",
"chytit",
"chápat",
"cigareta",
"cizí",
"co",
"cokoli",
"cosi",
"což",
"cukr",
"cíl",
"církev",
"cítit",
"daleko",
"další",
"daný",
"datum",
"daň",
"dařit",
"dcera",
"dech",
"den",
"denně",
"deník",
"deset",
"design",
"deska",
"desítka",
"detail",
"devět",
"diskuse",
"displej",
"dispozice",
"divadlo",
"divoký",
"divák",
"dlaň",
"dle",
"dlouho",
"dlouhodobý",
"dlouhý",
"dnes",
"dneska",
"dnešní",
"dno",
"do",
"doba",
"dobrý",
"dobře",
"docela",
"docházet",
"dodat",
"dodnes",
"dodávat",
"dohoda",
"dohromady",
"dojem",
"dojít",
"dokonalý",
"dokonce",
"dokončit",
"doktor",
"dokud",
"dokument",
"dokázat",
"dolar",
"dolů",
"doma",
"domnívat",
"domov",
"domácnost",
"domácí",
"domů",
"dopadnout",
"dopis",
"doplnit",
"doporučovat",
"doprava",
"dopravní",
"dorazit",
"dosahovat",
"doslova",
"dospělý",
"dost",
"dostat",
"dostatečný",
"dostatečně",
"dostupný",
"dostávat",
"dosud",
"dosáhnout",
"dotace",
"dotknout",
"doufat",
"dovnitř",
"dovolená",
"dovolit",
"dovést",
"dozvědět",
"dočkat",
"drahý",
"drobný",
"druh",
"druhý",
"dráha",
"držet",
"duben",
"duch",
"duše",
"dva",
"dvacet",
"dvakrát",
"dvanáct",
"dveře",
"dvůr",
"dále",
"dáma",
"dát",
"dávat",
"dávka",
"dávno",
"dávný",
"délka",
"déšť",
"díky",
"díl",
"dílo",
"díra",
"dít",
"dítě",
"dívat",
"dívka",
"dějiny",
"děkovat",
"dělat",
"dětský",
"dětství",
"dřevo",
"dřevěný",
"důkaz",
"důležitý",
"dům",
"důsledek",
"důvod",
"ekonomický",
"ekonomika",
"elektrický",
"energetický",
"energie",
"euro",
"Evropa",
"evropský",
"existence",
"existovat",
"fakt",
"faktor",
"fakulta",
"fanoušek",
"festival",
"film",
"filmový",
"finance",
"finanční",
"firma",
"fond",
"forma",
"fotbal",
"fotbalový",
"fotka",
"fotografie",
"Francie",
"francouzský",
"fungovat",
"funkce",
"fyzický",
"fáze",
"generace",
"gól",
"hala",
"herec",
"hezký",
"historický",
"historie",
"hladina",
"hlas",
"hlava",
"hlavní",
"hlavně",
"hledat",
"hledisko",
"hledět",
"hluboký",
"hmota",
"hmotnost",
"hned",
"hnutí",
"hnědý",
"hodina",
"hodit",
"hodlat",
"hodnocení",
"hodnota",
"hodně",
"holka",
"hora",
"horký",
"horní",
"hospodářský",
"host",
"hotel",
"hotový",
"hovořit",
"hra",
"hrad",
"hranice",
"hrdina",
"hrozit",
"hrozně",
"hrát",
"hráč",
"hudba",
"hudební",
"hvězda",
"hřiště",
"i",
"ideální",
"informace",
"informační",
"informovat",
"instituce",
"internet",
"internetový",
"investice",
"italský",
"jak",
"jakmile",
"jako",
"jaký",
"jakýkoli",
"jakýsi",
"jaro",
"jasný",
"jasně",
"jazyk",
"jeden",
"jedinec",
"jediný",
"jednak",
"jednat",
"jednoduchý",
"jednoduše",
"jednotka",
"jednotlivý",
"jednou",
"jednání",
"jeho",
"jejich",
"její",
"jelikož",
"jemný",
"jen",
"jenom",
"jenž",
"jenže",
"jestli",
"jestliže",
"jet",
"jev",
"jezdit",
"ještě",
"jinak",
"jinde",
"jiný",
"jistota",
"jistý",
"jistě",
"již",
"jižní",
"jmenovat",
"jméno",
"jo",
"já",
"jádro",
"jídlo",
"jíst",
"jít",
"jízda",
"k",
"kam",
"kamarád",
"kamenný",
"kamera",
"kancelář",
"kapacita",
"kapela",
"kapitola",
"kapitán",
"kapsa",
"kariéra",
"karta",
"kategorie",
"každý",
"kde",
"kdo",
"kdy",
"kdyby",
"kdykoli",
"kdysi",
"když",
"kilometr",
"klasický",
"klid",
"klidný",
"klidně",
"klient",
"klub",
"kluk",
"klást",
"klíč",
"klíčový",
"kniha",
"knihovna",
"knížka",
"kolega",
"kolem",
"koleno",
"kolik",
"kolo",
"kombinace",
"komise",
"komora",
"komunikace",
"konat",
"koncert",
"konec",
"konečný",
"konečně",
"konkrétní",
"konstrukce",
"kontakt",
"kontrola",
"končit",
"kopec",
"koruna",
"kost",
"kostel",
"koupit",
"kousek",
"kočka",
"košile",
"kraj",
"krajina",
"krajský",
"krev",
"krize",
"krk",
"krok",
"kromě",
"kruh",
"král",
"krása",
"krásný",
"krátce",
"krátký",
"který",
"kuchyně",
"kultura",
"kulturní",
"kurs",
"kus",
"kvalita",
"kvalitní",
"květ",
"květen",
"kvůli",
"kámen",
"káva",
"křeslo",
"křičet",
"křídlo",
"kůň",
"kůže",
"led",
"leden",
"lehce",
"lehký",
"les",
"letadlo",
"letní",
"letos",
"letošní",
"levný",
"levý",
"ležet",
"lidový",
"lidský",
"liga",
"linka",
"list",
"listopad",
"literatura",
"lišit",
"lokalita",
"Londýn",
"loď",
"loňský",
"lze",
"láska",
"látka",
"lék",
"lékař",
"léto",
"léčba",
"líbit",
"majetek",
"majitel",
"malý",
"maminka",
"manažer",
"manžel",
"manželka",
"mapa",
"maso",
"materiál",
"matka",
"metoda",
"metr",
"mezi",
"mezinárodní",
"miliarda",
"milimetr",
"milión",
"milovat",
"milý",
"mimo",
"ministerstvo",
"ministr",
"minulost",
"minulý",
"minuta",
"mistr",
"mladík",
"mladý",
"mluvit",
"mluvčí",
"mléko",
"mnohem",
"mnoho",
"mnohý",
"množství",
"mobil",
"mobilní",
"moc",
"moci",
"model",
"moderní",
"modrý",
"moment",
"Morava",
"most",
"motor",
"mozek",
"moře",
"možnost",
"možná",
"možný",
"mrtvý",
"muset",
"muzeum",
"muž",
"my",
"mysl",
"myslet",
"myšlenka",
"málo",
"máma",
"médium",
"míra",
"mírně",
"místnost",
"místní",
"místo",
"mít",
"měnit",
"město",
"městský",
"měsíc",
"můj",
"na",
"nabídka",
"nabídnout",
"nabízet",
"nacházet",
"nad",
"nadále",
"naděje",
"nahoru",
"nahradit",
"najednou",
"najít",
"nakonec",
"nalézt",
"naopak",
"napadnout",
"naposledy",
"naprosto",
"napsat",
"napětí",
"například",
"narazit",
"narodit",
"nastat",
"nastoupit",
"natolik",
"naučit",
"navrhnout",
"navzdory",
"navíc",
"navštívit",
"nazývat",
"naštěstí",
"ne",
"nebe",
"nebezpečí",
"nebo",
"neboť",
"nechat",
"nechávat",
"nedostatek",
"nedávno",
"neděle",
"nehoda",
"nejen",
"nejprve",
"nemoc",
"nemocnice",
"nemocný",
"nepřítel",
"neustále",
"nezbytný",
"než",
"nic",
"nicméně",
"nijak",
"nikdo",
"nikdy",
"nikoli",
"no",
"noc",
"noha",
"norma",
"normální",
"nos",
"nosit",
"novinka",
"noviny",
"novinář",
"nový",
"nově",
"noční",
"nutit",
"nutný",
"nyní",
"nábytek",
"nádherný",
"náhle",
"náhodou",
"náklad",
"nákup",
"nálada",
"náměstí",
"nápad",
"národ",
"národní",
"nárok",
"náročný",
"následek",
"následně",
"následovat",
"následující",
"nástroj",
"návrat",
"návrh",
"návštěva",
"návštěvník",
"název",
"názor",
"náš",
"nést",
"nízký",
"nýbrž",
"něco",
"nějak",
"nějaký",
"někde",
"někdo",
"někdy",
"několik",
"několikrát",
"některý",
"Němec",
"Německo",
"německý",
"o",
"oba",
"obava",
"obchod",
"obchodní",
"období",
"obec",
"obecný",
"obecně",
"objekt",
"objem",
"objevit",
"objevovat",
"oblast",
"oblečení",
"obličej",
"oblíbený",
"obor",
"obr",
"obrana",
"obraz",
"obrovský",
"obrátit",
"obrázek",
"obsah",
"obsahovat",
"obvod",
"obvykle",
"obvyklý",
"obyvatel",
"obyčejný",
"občan",
"občanský",
"občas",
"oběť",
"ochrana",
"ocitnout",
"od",
"odborník",
"odborný",
"odchod",
"odcházet",
"oddělení",
"odejít",
"odhalit",
"odjet",
"odkud",
"odlišný",
"odmítat",
"odmítnout",
"odpoledne",
"odpor",
"odpovídat",
"odpovědět",
"odpověď",
"oheň",
"ohled",
"okamžik",
"okamžitě",
"okno",
"oko",
"okolnost",
"okolní",
"okolo",
"okolí",
"okraj",
"olej",
"omezený",
"on",
"onemocnění",
"onen",
"oni",
"opakovat",
"opatření",
"operace",
"operační",
"oprava",
"opravdu",
"oproti",
"opustit",
"opět",
"organizace",
"orgán",
"osm",
"osoba",
"osobnost",
"osobní",
"osobně",
"ostatní",
"ostatně",
"Ostrava",
"ostrov",
"ostrý",
"osud",
"otec",
"otevřený",
"otevřít",
"otočit",
"otázka",
"ovlivnit",
"ovšem",
"označit",
"označovat",
"oznámit",
"ozvat",
"očekávat",
"pacient",
"padat",
"padesát",
"padnout",
"pak",
"pamatovat",
"památka",
"paměť",
"pan",
"paní",
"papír",
"parametr",
"park",
"partner",
"patnáct",
"patro",
"patřit",
"paže",
"peníze",
"pes",
"pevný",
"pevně",
"pivo",
"planeta",
"platit",
"plný",
"plně",
"plocha",
"plyn",
"Plzeň",
"plán",
"plánovat",
"po",
"pobyt",
"pochopit",
"pochopitelně",
"pocházet",
"pocit",
"pod",
"podat",
"podařit",
"podepsat",
"podivný",
"podlaha",
"podle",
"podmínka",
"podnik",
"podoba",
"podobný",
"podobně",
"podpora",
"podporovat",
"podpořit",
"podstata",
"podstatný",
"podzim",
"podávat",
"podíl",
"podílet",
"podívat",
"pohled",
"pohlédnout",
"pohyb",
"pohybovat",
"pojem",
"pokaždé",
"pokoj",
"pokoušet",
"pokračovat",
"pokud",
"pokus",
"pokusit",
"pole",
"policejní",
"policie",
"policista",
"politický",
"politik",
"politika",
"poloha",
"polovina",
"položit",
"pomalu",
"pomoc",
"pomoci",
"pomocí",
"pomyslet",
"pomáhat",
"poměr",
"poměrně",
"poněkud",
"popis",
"popisovat",
"poprvé",
"popsat",
"populace",
"poradit",
"posadit",
"poskytnout",
"poskytovat",
"poslat",
"poslední",
"poslouchat",
"postava",
"postavení",
"postavit",
"postel",
"postoj",
"postup",
"postupně",
"potkat",
"potom",
"potravina",
"potvrdit",
"poté",
"potíž",
"potřeba",
"potřebný",
"potřebovat",
"pouhý",
"pouze",
"použití",
"použít",
"používat",
"povaha",
"považovat",
"povinnost",
"povrch",
"povést",
"povídat",
"povědět",
"pozdní",
"pozdě",
"pozemek",
"pozice",
"pozitivní",
"poznamenat",
"poznat",
"poznámka",
"pozor",
"pozornost",
"pozorovat",
"pozvat",
"počasí",
"počet",
"počkat",
"počátek",
"počítat",
"počítač",
"pořád",
"pořádek",
"pořádně",
"pořídit",
"požadavek",
"požádat",
"prach",
"pracovat",
"pracovní",
"pracovník",
"Praha",
"prakticky",
"praktický",
"pravda",
"pravděpodobně",
"pravidelný",
"pravidelně",
"pravidlo",
"pravý",
"praxe",
"pražský",
"premiér",
"prezident",
"princip",
"pro",
"problém",
"probudit",
"probíhat",
"proběhnout",
"procento",
"proces",
"procházet",
"prodat",
"prodej",
"produkce",
"produkt",
"prodávat",
"profesor",
"program",
"prohlásit",
"projekt",
"projev",
"projevit",
"projevovat",
"projít",
"promluvit",
"proměnit",
"prosinec",
"prosit",
"prostor",
"prostě",
"prostředek",
"prostřednictvím",
"prostředí",
"proti",
"proto",
"protože",
"proud",
"provedení",
"provoz",
"provádět",
"provést",
"prozradit",
"proč",
"prst",
"prvek",
"první",
"pryč",
"práce",
"právní",
"právo",
"právě",
"prázdný",
"prý",
"průběh",
"průmysl",
"průměr",
"průměrný",
"psát",
"pták",
"ptát",
"pustit",
"pád",
"pán",
"pár",
"pátek",
"péče",
"píseň",
"pít",
"pěkný",
"pěkně",
"pět",
"přece",
"před",
"předchozí",
"předem",
"především",
"předmět",
"přednost",
"přední",
"předpoklad",
"předpokládat",
"předseda",
"představa",
"představení",
"představit",
"představovat",
"předtím",
"přejít",
"překvapit",
"přemýšlet",
"přes",
"přesný",
"přesně",
"přestat",
"přesto",
"přestože",
"přesvědčit",
"převzít",
"přečíst",
"přežít",
"při",
"přibližně",
"přiblížit",
"přicházet",
"přidat",
"přijet",
"přijmout",
"přijít",
"přikývnout",
"přinášet",
"přinést",
"připadat",
"připojit",
"připomenout",
"připomínat",
"připravený",
"připravit",
"připravovat",
"přirozený",
"přitom",
"přivést",
"přiznat",
"přičemž",
"přání",
"přát",
"příběh",
"příjem",
"příjemný",
"příklad",
"příležitost",
"příliš",
"přímo",
"přímý",
"případ",
"případný",
"případně",
"příprava",
"příroda",
"přírodní",
"příslušný",
"příspěvek",
"přístroj",
"přístup",
"přítel",
"přítomnost",
"přítomný",
"příčina",
"příští",
"půda",
"půl",
"působení",
"působit",
"původ",
"původní",
"původně",
"rada",
"radnice",
"radost",
"rameno",
"reagovat",
"reakce",
"realita",
"realizace",
"region",
"regionální",
"rekonstrukce",
"republika",
"restaurace",
"ret",
"reálný",
"režim",
"režisér",
"riziko",
"rodina",
"rodinný",
"rodič",
"roh",
"rok",
"role",
"román",
"rostlina",
"rovnice",
"rovnou",
"rovněž",
"rozdíl",
"rozdělit",
"rozhodnout",
"rozhodnutí",
"rozhodně",
"rozhodovat",
"rozhovor",
"rozměr",
"rozpočet",
"rozsah",
"rozsáhlý",
"rozumět",
"rozvoj",
"rozšířit",
"ročník",
"ruka",
"Rusko",
"ruský",
"ryba",
"rychle",
"rychlost",
"rychlý",
"rád",
"rámec",
"rána",
"ráno",
"růst",
"různý",
"s",
"samostatný",
"samotný",
"samozřejmě",
"samý",
"sbor",
"sbírka",
"schod",
"schopnost",
"schopný",
"scéna",
"sdružení",
"sdělit",
"se",
"sedm",
"sednout",
"sedět",
"sejít",
"sem",
"sen",
"seriál",
"sestra",
"setkat",
"setkání",
"severní",
"seznam",
"seznámit",
"sezona",
"sice",
"signál",
"silnice",
"silný",
"silně",
"situace",
"skladba",
"sklo",
"skončit",
"skoro",
"skrývat",
"skupina",
"skutečnost",
"skutečný",
"skutečně",
"skvělý",
"skála",
"slabý",
"slavný",
"sledovat",
"slečna",
"sloužit",
"Slovensko",
"slovenský",
"slovo",
"složitý",
"složka",
"slunce",
"sluneční",
"služba",
"slyšet",
"slza",
"smlouva",
"smrt",
"smysl",
"smát",
"smích",
"směr",
"smět",
"snad",
"snadno",
"snadný",
"snaha",
"snažit",
"sníh",
"snímek",
"snížit",
"sobota",
"sociální",
"sotva",
"soubor",
"soud",
"souhlasit",
"soukromý",
"soupeř",
"soused",
"soustava",
"soustředit",
"soutěž",
"souviset",
"souvislost",
"současnost",
"současný",
"současně",
"součást",
"spadnout",
"spatřit",
"specifický",
"speciální",
"spisovatel",
"splnit",
"spodní",
"spojení",
"spojený",
"spojit",
"spokojený",
"společenský",
"společnost",
"společný",
"společně",
"spolu",
"spolupráce",
"spor",
"sport",
"sportovní",
"spotřeba",
"spousta",
"spočívat",
"správa",
"správný",
"správně",
"spustit",
"spánek",
"spát",
"spíš",
"srdce",
"srovnání",
"srpen",
"stanice",
"stanovit",
"starat",
"starost",
"starosta",
"starý",
"stav",
"stavba",
"stavební",
"stavět",
"stačit",
"stejný",
"stejně",
"stihnout",
"sto",
"století",
"stopa",
"stovka",
"strach",
"strana",
"strategie",
"strašně",
"stroj",
"strom",
"struktura",
"stránka",
"strávit",
"student",
"studený",
"studie",
"studium",
"studovat",
"stupeň",
"styl",
"stáhnout",
"stále",
"stát",
"státní",
"stávat",
"stín",
"stěna",
"střecha",
"střední",
"stůl",
"suchý",
"svatý",
"svaz",
"svoboda",
"svobodný",
"svět",
"světlo",
"světový",
"svůj",
"symbol",
"syn",
"systém",
"sál",
"sám",
"série",
"síla",
"síť",
"sůl",
"tabulka",
"tady",
"tajemství",
"tajný",
"tak",
"takhle",
"takový",
"takto",
"taky",
"takzvaný",
"také",
"takže",
"tam",
"technický",
"technika",
"technologie",
"teda",
"tedy",
"tehdejší",
"tehdy",
"telefon",
"televize",
"televizní",
"temný",
"ten",
"tenhle",
"tenkrát",
"tento",
"tentokrát",
"tentýž",
"teorie",
"teplo",
"teplota",
"teplý",
"teprve",
"termín",
"test",
"text",
"teď",
"ticho",
"tichý",
"tisíc",
"titul",
"tiše",
"tlak",
"tlačítko",
"tma",
"tmavý",
"to",
"tolik",
"totiž",
"touha",
"toužit",
"tradice",
"tradiční",
"trasa",
"trať",
"trend",
"trenér",
"trest",
"trh",
"trochu",
"trpět",
"trvat",
"tráva",
"tu",
"turnaj",
"tušit",
"tvar",
"tvorba",
"tvořit",
"tvrdit",
"tvrdý",
"tvář",
"tvůj",
"ty",
"typ",
"typický",
"tábor",
"táhnout",
"táta",
"téma",
"téměř",
"též",
"tón",
"týden",
"týkat",
"tým",
"týž",
"tělo",
"těsně",
"těšit",
"těžko",
"těžký",
"třeba",
"třetina",
"třetí",
"tři",
"třicet",
"třída",
"u",
"ucho",
"udržet",
"udržovat",
"událost",
"udělat",
"ukazovat",
"ukázat",
"ulice",
"uložit",
"umožnit",
"umožňovat",
"umístit",
"umělec",
"umělecký",
"umělý",
"umění",
"umět",
"unie",
"univerzita",
"upozornit",
"uprostřed",
"určený",
"určit",
"určitý",
"určitě",
"uskutečnit",
"usmát",
"usmívat",
"utkání",
"utéci",
"uvažovat",
"uvedený",
"uvidět",
"uvnitř",
"uvádět",
"uvést",
"uvědomit",
"uvědomovat",
"uzavřít",
"učinit",
"učit",
"učitel",
"už",
"uživatel",
"užívat",
"v",
"vadit",
"varianta",
"vazba",
"vedení",
"vedle",
"vedoucí",
"vejít",
"velice",
"velikost",
"veliký",
"velký",
"velmi",
"ven",
"venku",
"verze",
"vesmír",
"vesnice",
"večer",
"večeře",
"veřejnost",
"veřejný",
"veškerý",
"vhodný",
"viditelný",
"vidět",
"vina",
"viset",
"viz",
"vlak",
"vlas",
"vlastnost",
"vlastní",
"vlastně",
"vliv",
"vlna",
"vloni",
"vláda",
"vnitřní",
"vnímat",
"vnější",
"voda",
"vodní",
"vojenský",
"voják",
"volat",
"volba",
"volit",
"volný",
"vozidlo",
"vracet",
"vrchol",
"vrstva",
"vrátit",
"vstoupit",
"vstup",
"vstát",
"vteřina",
"vy",
"vybavit",
"vybraný",
"vybrat",
"vybírat",
"vycházet",
"vydat",
"vydržet",
"vydání",
"vydávat",
"vyhnout",
"vyhrát",
"vyjádřit",
"vyjít",
"vypadat",
"vyprávět",
"vyrazit",
"vyrábět",
"vyskytovat",
"vysoko",
"vysoký",
"vystoupit",
"vystupovat",
"vysvětlit",
"vysvětlovat",
"vytvořit",
"vytvářet",
"vytáhnout",
"využití",
"využít",
"využívat",
"vyvolat",
"vyzkoušet",
"vyřešit",
"vyžadovat",
"vzduch",
"vzdálenost",
"vzdálený",
"vzdát",
"vzdělání",
"vzdělávání",
"vzhledem",
"vznik",
"vznikat",
"vzniknout",
"vzor",
"vzpomenout",
"vzpomínat",
"vzpomínka",
"vztah",
"vzájemný",
"vzít",
"váha",
"válka",
"Vánoce",
"vánoční",
"váš",
"vážný",
"vážně",
"vést",
"víc",
"více",
"víkend",
"víno",
"víra",
"vítr",
"vítěz",
"vítězství",
"výbor",
"výběr",
"východ",
"východní",
"výchova",
"výhoda",
"výjimka",
"výkon",
"výměna",
"výraz",
"výrazný",
"výrazně",
"výroba",
"výrobce",
"výrobek",
"výsledek",
"výstava",
"výstavba",
"vývoj",
"výzkum",
"význam",
"významný",
"výzva",
"výše",
"výška",
"včera",
"včetně",
"věc",
"věda",
"vědec",
"vědecký",
"vědomí",
"vědět",
"věk",
"věnovat",
"věta",
"větev",
"většina",
"většinou",
"vězení",
"věřit",
"věž",
"však",
"všechen",
"všimnout",
"všude",
"vůbec",
"vůle",
"vůně",
"vůz",
"vůči",
"vždy",
"vždycky",
"vždyť",
"z",
"za",
"zabránit",
"zabít",
"zabývat",
"zachovat",
"zachránit",
"zadní",
"zahrada",
"zahraniční",
"zahraničí",
"zahájit",
"zajistit",
"zajímat",
"zajímavý",
"zajít",
"zakázka",
"založit",
"zamířit",
"zaměstnanec",
"zaměřit",
"zaplatit",
"zapomenout",
"zas",
"zase",
"zasmát",
"zastavit",
"zasáhnout",
"zatím",
"zatímco",
"zaujmout",
"zavolat",
"zavést",
"zavřít",
"zaznamenat",
"začátek",
"začínat",
"začít",
"zařízení",
"zažít",
"zbavit",
"zboží",
"zbraň",
"zbytek",
"zbýt",
"zbývat",
"zcela",
"zda",
"zde",
"zdravotní",
"zdraví",
"zdravý",
"zdroj",
"zdát",
"zejména",
"zelený",
"země",
"zemřít",
"zeptat",
"zeď",
"zhruba",
"zima",
"zimní",
"zisk",
"zjistit",
"zkouška",
"zkrátka",
"zkusit",
"zkušenost",
"zlato",
"zlatý",
"zlý",
"zmizet",
"zmínit",
"zmíněný",
"změna",
"změnit",
"znak",
"znalost",
"znamenat",
"značka",
"značný",
"znovu",
"známý",
"znát",
"znít",
"zpravidla",
"zpráva",
"zpátky",
"zpívat",
"zpět",
"způsob",
"způsobit",
"zrovna",
"ztratit",
"ztrácet",
"ztráta",
"zub",
"zvednout",
"zvládnout",
"zvláštní",
"zvláště",
"zvlášť",
"zvolit",
"zvuk",
"zvyšovat",
"zvíře",
"zvýšení",
"zvýšit",
"záda",
"zájem",
"zákazník",
"základ",
"základní",
"zákon",
"záležet",
"záležitost",
"zámek",
"západ",
"západní",
"zápas",
"zároveň",
"zásada",
"zásadní",
"zásah",
"zástupce",
"závislost",
"závislý",
"závod",
"závěr",
"záznam",
"září",
"zážitek",
"získat",
"zítra",
"zřejmě",
"zůstat",
"zůstávat",
"údaj",
"úkol",
"únor",
"úplný",
"úplně",
"úprava",
"úroveň",
"úsek",
"úsměv",
"úspěch",
"úspěšný",
"ústa",
"ústav",
"útok",
"útočník",
"úvaha",
"území",
"úzký",
"účast",
"účastník",
"účel",
"účet",
"účinek",
"úřad",
"úžasný",
"čaj",
"čas",
"časopis",
"časový",
"často",
"častý",
"Čech",
"Čechy",
"čekat",
"čelo",
"černý",
"čerstvý",
"červen",
"červenec",
"červený",
"Česko",
"český",
"či",
"čin",
"činit",
"činnost",
"čistý",
"člen",
"člověk",
"článek",
"čtenář",
"čtvrtý",
"čtyři",
"část",
"částice",
"částka",
"Čína",
"čínský",
"číslo",
"číst",
"řada",
"ředitel",
"řeka",
"řeč",
"řešení",
"řešit",
"řidič",
"řád",
"říci",
"řídit",
"říjen",
"říkat",
"řízení",
"šance",
"šaty",
"šedý",
"šest",
"široký",
"škoda",
"škola",
"školní",
"špatný",
"špatně",
"štěstí",
"šéf",
"šťastný",
"že",
"žena",
"ženský",
"židle",
"život",
"životní",
"živý",
"žlutý",
"žádat",
"žádný",
"žádost",
"žák",
"žít",
)
parts_of_speech: Dict[str, tuple] = {} | PypiClean |
/GRR-M2Crypto-0.22.6.tar.gz/GRR-M2Crypto-0.22.6/M2Crypto/httpslib.py | import string, sys
import socket
from urlparse import urlsplit, urlunsplit
import base64
from httplib import *
from httplib import HTTPS_PORT # This is not imported with just '*'
import SSL
class HTTPSConnection(HTTPConnection):
"""
This class allows communication via SSL using M2Crypto.
"""
default_port = HTTPS_PORT
def __init__(self, host, port=None, strict=None, **ssl):
self.session = None
keys = ssl.keys()
try:
keys.remove('key_file')
except ValueError:
pass
try:
keys.remove('cert_file')
except ValueError:
pass
try:
keys.remove('ssl_context')
except ValueError:
pass
if keys:
raise ValueError('unknown keyword argument')
try:
self.ssl_ctx = ssl['ssl_context']
assert isinstance(self.ssl_ctx, SSL.Context), self.ssl_ctx
except KeyError:
self.ssl_ctx = SSL.Context('sslv23')
HTTPConnection.__init__(self, host, port, strict)
def connect(self):
self.sock = SSL.Connection(self.ssl_ctx)
if self.session:
self.sock.set_session(self.session)
self.sock.connect((self.host, self.port))
def close(self):
# This kludges around line 545 of httplib.py,
# which closes the connection in this object;
# the connection remains open in the response
# object.
#
# M2Crypto doesn't close-here-keep-open-there,
# so, in effect, we don't close until the whole
# business is over and gc kicks in.
#
# XXX Long-running callers beware leakage.
#
# XXX 05-Jan-2002: This module works with Python 2.2,
# XXX but I've not investigated if the above conditions
# XXX remain.
pass
def get_session(self):
return self.sock.get_session()
def set_session(self, session):
self.session = session
class HTTPS(HTTP):
_connection_class = HTTPSConnection
def __init__(self, host='', port=None, strict=None, **ssl):
HTTP.__init__(self, host, port, strict)
try:
self.ssl_ctx = ssl['ssl_context']
except KeyError:
self.ssl_ctx = SSL.Context('sslv23')
assert isinstance(self._conn, HTTPSConnection)
self._conn.ssl_ctx = self.ssl_ctx
class ProxyHTTPSConnection(HTTPSConnection):
"""
An HTTPS Connection that uses a proxy and the CONNECT request.
When the connection is initiated, CONNECT is first sent to the proxy (along
with authorization headers, if supplied). If successful, an SSL connection
will be established over the socket through the proxy and to the target
host.
Finally, the actual request is sent over the SSL connection tunneling
through the proxy.
"""
_ports = {'http' : 80, 'https' : 443}
_AUTH_HEADER = "Proxy-Authorization"
_UA_HEADER = "User-Agent"
def __init__(self, host, port=None, strict=None, username=None,
password=None, **ssl):
"""
Create the ProxyHTTPSConnection object.
host and port are the hostname and port number of the proxy server.
"""
HTTPSConnection.__init__(self, host, port, strict, **ssl)
self._username = username
self._password = password
self._proxy_auth = None
self._proxy_UA = None
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
#putrequest is called before connect, so can interpret url and get
#real host/port to be used to make CONNECT request to proxy
proto, netloc, path, query, fragment = urlsplit(url)
if not proto:
raise ValueError, "unknown URL type: %s" % url
#get host & port
try:
username_password, host_port = netloc.split('@')
except ValueError:
host_port = netloc
try:
host, port = host_port.split(':')
except ValueError:
host = host_port
#try to get port from proto
try:
port = self._ports[proto]
except KeyError:
raise ValueError, "unknown protocol for: %s" % url
self._real_host = host
self._real_port = int(port)
rest = urlunsplit((None, None, path, query, fragment))
if sys.version_info < (2,4):
HTTPSConnection.putrequest(self, method, rest, skip_host)
else:
HTTPSConnection.putrequest(self, method, rest, skip_host, skip_accept_encoding)
def putheader(self, header, value):
# Store the auth header if passed in.
if header.lower() == self._UA_HEADER.lower():
self._proxy_UA = value
if header.lower() == self._AUTH_HEADER.lower():
self._proxy_auth = value
else:
HTTPSConnection.putheader(self, header, value)
def endheaders(self):
# We've recieved all of hte headers. Use the supplied username
# and password for authorization, possibly overriding the authstring
# supplied in the headers.
if not self._proxy_auth:
self._proxy_auth = self._encode_auth()
HTTPSConnection.endheaders(self)
def connect(self):
HTTPConnection.connect(self)
#send proxy CONNECT request
self.sock.sendall(self._get_connect_msg())
response = HTTPResponse(self.sock)
response.begin()
code = response.status
if code != 200:
#proxy returned and error, abort connection, and raise exception
self.close()
raise socket.error, "Proxy connection failed: %d" % code
self._start_ssl()
def _get_connect_msg(self):
""" Return an HTTP CONNECT request to send to the proxy. """
msg = "CONNECT %s:%d HTTP/1.1\r\n" % (self._real_host, self._real_port)
msg = msg + "Host: %s:%d\r\n" % (self._real_host, self._real_port)
if self._proxy_UA:
msg = msg + "%s: %s\r\n" % (self._UA_HEADER, self._proxy_UA)
if self._proxy_auth:
msg = msg + "%s: %s\r\n" % (self._AUTH_HEADER, self._proxy_auth)
msg = msg + "\r\n"
return msg
def _start_ssl(self):
""" Make this connection's socket SSL-aware. """
self.sock = SSL.Connection(self.ssl_ctx, self.sock)
self.sock.setup_ssl()
self.sock.set_connect_state()
self.sock.connect_ssl()
def _encode_auth(self):
""" Encode the username and password for use in the auth header. """
if not (self._username and self._password):
return None
# Authenticated proxy
userpass = "%s:%s" % (self._username, self._password)
enc_userpass = base64.encodestring(userpass).replace("\n", "")
return "Basic %s" % enc_userpass | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/ekko-lightbox/ekko-lightbox.js | +function ($) {
'use strict';
var _createClass = (function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ('value' in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; })();
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError('Cannot call a class as a function'); } }
var Lightbox = (function ($) {
var NAME = 'ekkoLightbox';
var JQUERY_NO_CONFLICT = $.fn[NAME];
var Default = {
title: '',
footer: '',
maxWidth: 9999,
maxHeight: 9999,
showArrows: true, //display the left / right arrows or not
wrapping: true, //if true, gallery loops infinitely
type: null, //force the lightbox into image / youtube mode. if null, or not image|youtube|vimeo; detect it
alwaysShowClose: false, //always show the close button, even if there is no title
loadingMessage: '<div class="ekko-lightbox-loader"><div><div></div><div></div></div></div>', // http://tobiasahlin.com/spinkit/
leftArrow: '<span>❮</span>',
rightArrow: '<span>❯</span>',
strings: {
close: 'Close',
fail: 'Failed to load image:',
type: 'Could not detect remote target type. Force the type using data-type'
},
doc: document, // if in an iframe can specify top.document
onShow: function onShow() {},
onShown: function onShown() {},
onHide: function onHide() {},
onHidden: function onHidden() {},
onNavigate: function onNavigate() {},
onContentLoaded: function onContentLoaded() {}
};
var Lightbox = (function () {
_createClass(Lightbox, null, [{
key: 'Default',
/**
Class properties:
_$element: null -> the <a> element currently being displayed
_$modal: The bootstrap modal generated
_$modalDialog: The .modal-dialog
_$modalContent: The .modal-content
_$modalBody: The .modal-body
_$modalHeader: The .modal-header
_$modalFooter: The .modal-footer
_$lightboxContainerOne: Container of the first lightbox element
_$lightboxContainerTwo: Container of the second lightbox element
_$lightboxBody: First element in the container
_$modalArrows: The overlayed arrows container
_$galleryItems: Other <a>'s available for this gallery
_galleryName: Name of the current data('gallery') showing
_galleryIndex: The current index of the _$galleryItems being shown
_config: {} the options for the modal
_modalId: unique id for the current lightbox
_padding / _border: CSS properties for the modal container; these are used to calculate the available space for the content
*/
get: function get() {
return Default;
}
}]);
function Lightbox($element, config) {
var _this = this;
_classCallCheck(this, Lightbox);
this._config = $.extend({}, Default, config);
this._$modalArrows = null;
this._galleryIndex = 0;
this._galleryName = null;
this._padding = null;
this._border = null;
this._titleIsShown = false;
this._footerIsShown = false;
this._wantedWidth = 0;
this._wantedHeight = 0;
this._touchstartX = 0;
this._touchendX = 0;
this._modalId = 'ekkoLightbox-' + Math.floor(Math.random() * 1000 + 1);
this._$element = $element instanceof jQuery ? $element : $($element);
this._isBootstrap3 = $.fn.modal.Constructor.VERSION[0] == 3;
var h4 = '<h4 class="modal-title">' + (this._config.title || " ") + '</h4>';
var btn = '<button type="button" class="close" data-dismiss="modal" aria-label="' + this._config.strings.close + '"><span aria-hidden="true">×</span></button>';
var header = '<div class="modal-header' + (this._config.title || this._config.alwaysShowClose ? '' : ' hide') + '">' + (this._isBootstrap3 ? btn + h4 : h4 + btn) + '</div>';
var footer = '<div class="modal-footer' + (this._config.footer ? '' : ' hide') + '">' + (this._config.footer || " ") + '</div>';
var body = '<div class="modal-body"><div class="ekko-lightbox-container"><div class="ekko-lightbox-item fade in show"></div><div class="ekko-lightbox-item fade"></div></div></div>';
var dialog = '<div class="modal-dialog" role="document"><div class="modal-content">' + header + body + footer + '</div></div>';
$(this._config.doc.body).append('<div id="' + this._modalId + '" class="ekko-lightbox modal fade" tabindex="-1" tabindex="-1" role="dialog" aria-hidden="true">' + dialog + '</div>');
this._$modal = $('#' + this._modalId, this._config.doc);
this._$modalDialog = this._$modal.find('.modal-dialog').first();
this._$modalContent = this._$modal.find('.modal-content').first();
this._$modalBody = this._$modal.find('.modal-body').first();
this._$modalHeader = this._$modal.find('.modal-header').first();
this._$modalFooter = this._$modal.find('.modal-footer').first();
this._$lightboxContainer = this._$modalBody.find('.ekko-lightbox-container').first();
this._$lightboxBodyOne = this._$lightboxContainer.find('> div:first-child').first();
this._$lightboxBodyTwo = this._$lightboxContainer.find('> div:last-child').first();
this._border = this._calculateBorders();
this._padding = this._calculatePadding();
this._galleryName = this._$element.data('gallery');
if (this._galleryName) {
this._$galleryItems = $(document.body).find('*[data-gallery="' + this._galleryName + '"]');
this._galleryIndex = this._$galleryItems.index(this._$element);
$(document).on('keydown.ekkoLightbox', this._navigationalBinder.bind(this));
// add the directional arrows to the modal
if (this._config.showArrows && this._$galleryItems.length > 1) {
this._$lightboxContainer.append('<div class="ekko-lightbox-nav-overlay"><a href="#">' + this._config.leftArrow + '</a><a href="#">' + this._config.rightArrow + '</a></div>');
this._$modalArrows = this._$lightboxContainer.find('div.ekko-lightbox-nav-overlay').first();
this._$lightboxContainer.on('click', 'a:first-child', function (event) {
event.preventDefault();
return _this.navigateLeft();
});
this._$lightboxContainer.on('click', 'a:last-child', function (event) {
event.preventDefault();
return _this.navigateRight();
});
this.updateNavigation();
}
}
this._$modal.on('show.bs.modal', this._config.onShow.bind(this)).on('shown.bs.modal', function () {
_this._toggleLoading(true);
_this._handle();
return _this._config.onShown.call(_this);
}).on('hide.bs.modal', this._config.onHide.bind(this)).on('hidden.bs.modal', function () {
if (_this._galleryName) {
$(document).off('keydown.ekkoLightbox');
$(window).off('resize.ekkoLightbox');
}
_this._$modal.remove();
return _this._config.onHidden.call(_this);
}).modal(this._config);
$(window).on('resize.ekkoLightbox', function () {
_this._resize(_this._wantedWidth, _this._wantedHeight);
});
this._$lightboxContainer.on('touchstart', function () {
_this._touchstartX = event.changedTouches[0].screenX;
}).on('touchend', function () {
_this._touchendX = event.changedTouches[0].screenX;
_this._swipeGesure();
});
}
_createClass(Lightbox, [{
key: 'element',
value: function element() {
return this._$element;
}
}, {
key: 'modal',
value: function modal() {
return this._$modal;
}
}, {
key: 'navigateTo',
value: function navigateTo(index) {
if (index < 0 || index > this._$galleryItems.length - 1) return this;
this._galleryIndex = index;
this.updateNavigation();
this._$element = $(this._$galleryItems.get(this._galleryIndex));
this._handle();
}
}, {
key: 'navigateLeft',
value: function navigateLeft() {
if (!this._$galleryItems) return;
if (this._$galleryItems.length === 1) return;
if (this._galleryIndex === 0) {
if (this._config.wrapping) this._galleryIndex = this._$galleryItems.length - 1;else return;
} else //circular
this._galleryIndex--;
this._config.onNavigate.call(this, 'left', this._galleryIndex);
return this.navigateTo(this._galleryIndex);
}
}, {
key: 'navigateRight',
value: function navigateRight() {
if (!this._$galleryItems) return;
if (this._$galleryItems.length === 1) return;
if (this._galleryIndex === this._$galleryItems.length - 1) {
if (this._config.wrapping) this._galleryIndex = 0;else return;
} else //circular
this._galleryIndex++;
this._config.onNavigate.call(this, 'right', this._galleryIndex);
return this.navigateTo(this._galleryIndex);
}
}, {
key: 'updateNavigation',
value: function updateNavigation() {
if (!this._config.wrapping) {
var $nav = this._$lightboxContainer.find('div.ekko-lightbox-nav-overlay');
if (this._galleryIndex === 0) $nav.find('a:first-child').addClass('disabled');else $nav.find('a:first-child').removeClass('disabled');
if (this._galleryIndex === this._$galleryItems.length - 1) $nav.find('a:last-child').addClass('disabled');else $nav.find('a:last-child').removeClass('disabled');
}
}
}, {
key: 'close',
value: function close() {
return this._$modal.modal('hide');
}
// helper private methods
}, {
key: '_navigationalBinder',
value: function _navigationalBinder(event) {
event = event || window.event;
if (event.keyCode === 39) return this.navigateRight();
if (event.keyCode === 37) return this.navigateLeft();
}
// type detection private methods
}, {
key: '_detectRemoteType',
value: function _detectRemoteType(src, type) {
type = type || false;
if (!type && this._isImage(src)) type = 'image';
if (!type && this._getYoutubeId(src)) type = 'youtube';
if (!type && this._getVimeoId(src)) type = 'vimeo';
if (!type && this._getInstagramId(src)) type = 'instagram';
if (!type || ['image', 'youtube', 'vimeo', 'instagram', 'video', 'url'].indexOf(type) < 0) type = 'url';
return type;
}
}, {
key: '_isImage',
value: function _isImage(string) {
return string && string.match(/(^data:image\/.*,)|(\.(jp(e|g|eg)|gif|png|bmp|webp|svg)((\?|#).*)?$)/i);
}
}, {
key: '_containerToUse',
value: function _containerToUse() {
var _this2 = this;
// if currently showing an image, fade it out and remove
var $toUse = this._$lightboxBodyTwo;
var $current = this._$lightboxBodyOne;
if (this._$lightboxBodyTwo.hasClass('in')) {
$toUse = this._$lightboxBodyOne;
$current = this._$lightboxBodyTwo;
}
$current.removeClass('in show');
setTimeout(function () {
if (!_this2._$lightboxBodyTwo.hasClass('in')) _this2._$lightboxBodyTwo.empty();
if (!_this2._$lightboxBodyOne.hasClass('in')) _this2._$lightboxBodyOne.empty();
}, 500);
$toUse.addClass('in show');
return $toUse;
}
}, {
key: '_handle',
value: function _handle() {
var $toUse = this._containerToUse();
this._updateTitleAndFooter();
var currentRemote = this._$element.attr('data-remote') || this._$element.attr('href');
var currentType = this._detectRemoteType(currentRemote, this._$element.attr('data-type') || false);
if (['image', 'youtube', 'vimeo', 'instagram', 'video', 'url'].indexOf(currentType) < 0) return this._error(this._config.strings.type);
switch (currentType) {
case 'image':
this._preloadImage(currentRemote, $toUse);
this._preloadImageByIndex(this._galleryIndex, 3);
break;
case 'youtube':
this._showYoutubeVideo(currentRemote, $toUse);
break;
case 'vimeo':
this._showVimeoVideo(this._getVimeoId(currentRemote), $toUse);
break;
case 'instagram':
this._showInstagramVideo(this._getInstagramId(currentRemote), $toUse);
break;
case 'video':
this._showHtml5Video(currentRemote, $toUse);
break;
default:
// url
this._loadRemoteContent(currentRemote, $toUse);
break;
}
return this;
}
}, {
key: '_getYoutubeId',
value: function _getYoutubeId(string) {
if (!string) return false;
var matches = string.match(/^.*(youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=|\&v=)([^#\&\?]*).*/);
return matches && matches[2].length === 11 ? matches[2] : false;
}
}, {
key: '_getVimeoId',
value: function _getVimeoId(string) {
return string && string.indexOf('vimeo') > 0 ? string : false;
}
}, {
key: '_getInstagramId',
value: function _getInstagramId(string) {
return string && string.indexOf('instagram') > 0 ? string : false;
}
// layout private methods
}, {
key: '_toggleLoading',
value: function _toggleLoading(show) {
show = show || false;
if (show) {
this._$modalDialog.css('display', 'none');
this._$modal.removeClass('in show');
$('.modal-backdrop').append(this._config.loadingMessage);
} else {
this._$modalDialog.css('display', 'block');
this._$modal.addClass('in show');
$('.modal-backdrop').find('.ekko-lightbox-loader').remove();
}
return this;
}
}, {
key: '_calculateBorders',
value: function _calculateBorders() {
return {
top: this._totalCssByAttribute('border-top-width'),
right: this._totalCssByAttribute('border-right-width'),
bottom: this._totalCssByAttribute('border-bottom-width'),
left: this._totalCssByAttribute('border-left-width')
};
}
}, {
key: '_calculatePadding',
value: function _calculatePadding() {
return {
top: this._totalCssByAttribute('padding-top'),
right: this._totalCssByAttribute('padding-right'),
bottom: this._totalCssByAttribute('padding-bottom'),
left: this._totalCssByAttribute('padding-left')
};
}
}, {
key: '_totalCssByAttribute',
value: function _totalCssByAttribute(attribute) {
return parseInt(this._$modalDialog.css(attribute), 10) + parseInt(this._$modalContent.css(attribute), 10) + parseInt(this._$modalBody.css(attribute), 10);
}
}, {
key: '_updateTitleAndFooter',
value: function _updateTitleAndFooter() {
var title = this._$element.data('title') || "";
var caption = this._$element.data('footer') || "";
this._titleIsShown = false;
if (title || this._config.alwaysShowClose) {
this._titleIsShown = true;
this._$modalHeader.css('display', '').find('.modal-title').html(title || " ");
} else this._$modalHeader.css('display', 'none');
this._footerIsShown = false;
if (caption) {
this._footerIsShown = true;
this._$modalFooter.css('display', '').html(caption);
} else this._$modalFooter.css('display', 'none');
return this;
}
}, {
key: '_showYoutubeVideo',
value: function _showYoutubeVideo(remote, $containerForElement) {
var id = this._getYoutubeId(remote);
var query = remote.indexOf('&') > 0 ? remote.substr(remote.indexOf('&')) : '';
var width = this._$element.data('width') || 560;
var height = this._$element.data('height') || width / (560 / 315);
return this._showVideoIframe('//www.youtube.com/embed/' + id + '?badge=0&autoplay=1&html5=1' + query, width, height, $containerForElement);
}
}, {
key: '_showVimeoVideo',
value: function _showVimeoVideo(id, $containerForElement) {
var width = this._$element.data('width') || 500;
var height = this._$element.data('height') || width / (560 / 315);
return this._showVideoIframe(id + '?autoplay=1', width, height, $containerForElement);
}
}, {
key: '_showInstagramVideo',
value: function _showInstagramVideo(id, $containerForElement) {
// instagram load their content into iframe's so this can be put straight into the element
var width = this._$element.data('width') || 612;
var height = width + 80;
id = id.substr(-1) !== '/' ? id + '/' : id; // ensure id has trailing slash
$containerForElement.html('<iframe width="' + width + '" height="' + height + '" src="' + id + 'embed/" frameborder="0" allowfullscreen></iframe>');
this._resize(width, height);
this._config.onContentLoaded.call(this);
if (this._$modalArrows) //hide the arrows when showing video
this._$modalArrows.css('display', 'none');
this._toggleLoading(false);
return this;
}
}, {
key: '_showVideoIframe',
value: function _showVideoIframe(url, width, height, $containerForElement) {
// should be used for videos only. for remote content use loadRemoteContent (data-type=url)
height = height || width; // default to square
$containerForElement.html('<div class="embed-responsive embed-responsive-16by9"><iframe width="' + width + '" height="' + height + '" src="' + url + '" frameborder="0" allowfullscreen class="embed-responsive-item"></iframe></div>');
this._resize(width, height);
this._config.onContentLoaded.call(this);
if (this._$modalArrows) this._$modalArrows.css('display', 'none'); //hide the arrows when showing video
this._toggleLoading(false);
return this;
}
}, {
key: '_showHtml5Video',
value: function _showHtml5Video(url, $containerForElement) {
// should be used for videos only. for remote content use loadRemoteContent (data-type=url)
var width = this._$element.data('width') || 560;
var height = this._$element.data('height') || width / (560 / 315);
$containerForElement.html('<div class="embed-responsive embed-responsive-16by9"><video width="' + width + '" height="' + height + '" src="' + url + '" preload="auto" autoplay controls class="embed-responsive-item"></video></div>');
this._resize(width, height);
this._config.onContentLoaded.call(this);
if (this._$modalArrows) this._$modalArrows.css('display', 'none'); //hide the arrows when showing video
this._toggleLoading(false);
return this;
}
}, {
key: '_loadRemoteContent',
value: function _loadRemoteContent(url, $containerForElement) {
var _this3 = this;
var width = this._$element.data('width') || 560;
var height = this._$element.data('height') || 560;
var disableExternalCheck = this._$element.data('disableExternalCheck') || false;
this._toggleLoading(false);
// external urls are loading into an iframe
// local ajax can be loaded into the container itself
if (!disableExternalCheck && !this._isExternal(url)) {
$containerForElement.load(url, $.proxy(function () {
return _this3._$element.trigger('loaded.bs.modal');l;
}));
} else {
$containerForElement.html('<iframe src="' + url + '" frameborder="0" allowfullscreen></iframe>');
this._config.onContentLoaded.call(this);
}
if (this._$modalArrows) //hide the arrows when remote content
this._$modalArrows.css('display', 'none');
this._resize(width, height);
return this;
}
}, {
key: '_isExternal',
value: function _isExternal(url) {
var match = url.match(/^([^:\/?#]+:)?(?:\/\/([^\/?#]*))?([^?#]+)?(\?[^#]*)?(#.*)?/);
if (typeof match[1] === "string" && match[1].length > 0 && match[1].toLowerCase() !== location.protocol) return true;
if (typeof match[2] === "string" && match[2].length > 0 && match[2].replace(new RegExp(':(' + ({
"http:": 80,
"https:": 443
})[location.protocol] + ')?$'), "") !== location.host) return true;
return false;
}
}, {
key: '_error',
value: function _error(message) {
console.error(message);
this._containerToUse().html(message);
this._resize(300, 300);
return this;
}
}, {
key: '_preloadImageByIndex',
value: function _preloadImageByIndex(startIndex, numberOfTimes) {
if (!this._$galleryItems) return;
var next = $(this._$galleryItems.get(startIndex), false);
if (typeof next == 'undefined') return;
var src = next.attr('data-remote') || next.attr('href');
if (next.attr('data-type') === 'image' || this._isImage(src)) this._preloadImage(src, false);
if (numberOfTimes > 0) return this._preloadImageByIndex(startIndex + 1, numberOfTimes - 1);
}
}, {
key: '_preloadImage',
value: function _preloadImage(src, $containerForImage) {
var _this4 = this;
$containerForImage = $containerForImage || false;
var img = new Image();
if ($containerForImage) {
(function () {
// if loading takes > 200ms show a loader
var loadingTimeout = setTimeout(function () {
$containerForImage.append(_this4._config.loadingMessage);
}, 200);
img.onload = function () {
if (loadingTimeout) clearTimeout(loadingTimeout);
loadingTimeout = null;
var image = $('<img />');
image.attr('src', img.src);
image.addClass('img-fluid');
// backward compatibility for bootstrap v3
image.css('width', '100%');
$containerForImage.html(image);
if (_this4._$modalArrows) _this4._$modalArrows.css('display', ''); // remove display to default to css property
_this4._resize(img.width, img.height);
_this4._toggleLoading(false);
return _this4._config.onContentLoaded.call(_this4);
};
img.onerror = function () {
_this4._toggleLoading(false);
return _this4._error(_this4._config.strings.fail + (' ' + src));
};
})();
}
img.src = src;
return img;
}
}, {
key: '_swipeGesure',
value: function _swipeGesure() {
if (this._touchendX < this._touchstartX) {
return this.navigateRight();
}
if (this._touchendX > this._touchstartX) {
return this.navigateLeft();
}
}
}, {
key: '_resize',
value: function _resize(width, height) {
height = height || width;
this._wantedWidth = width;
this._wantedHeight = height;
var imageAspecRatio = width / height;
// if width > the available space, scale down the expected width and height
var widthBorderAndPadding = this._padding.left + this._padding.right + this._border.left + this._border.right;
// force 10px margin if window size > 575px
var addMargin = this._config.doc.body.clientWidth > 575 ? 20 : 0;
var discountMargin = this._config.doc.body.clientWidth > 575 ? 0 : 20;
var maxWidth = Math.min(width + widthBorderAndPadding, this._config.doc.body.clientWidth - addMargin, this._config.maxWidth);
if (width + widthBorderAndPadding > maxWidth) {
height = (maxWidth - widthBorderAndPadding - discountMargin) / imageAspecRatio;
width = maxWidth;
} else width = width + widthBorderAndPadding;
var headerHeight = 0,
footerHeight = 0;
// as the resize is performed the modal is show, the calculate might fail
// if so, default to the default sizes
if (this._footerIsShown) footerHeight = this._$modalFooter.outerHeight(true) || 55;
if (this._titleIsShown) headerHeight = this._$modalHeader.outerHeight(true) || 67;
var borderPadding = this._padding.top + this._padding.bottom + this._border.bottom + this._border.top;
//calculated each time as resizing the window can cause them to change due to Bootstraps fluid margins
var margins = parseFloat(this._$modalDialog.css('margin-top')) + parseFloat(this._$modalDialog.css('margin-bottom'));
var maxHeight = Math.min(height, $(window).height() - borderPadding - margins - headerHeight - footerHeight, this._config.maxHeight - borderPadding - headerHeight - footerHeight);
if (height > maxHeight) {
// if height > the available height, scale down the width
width = Math.ceil(maxHeight * imageAspecRatio) + widthBorderAndPadding;
}
this._$lightboxContainer.css('height', maxHeight);
this._$modalDialog.css('flex', 1).css('maxWidth', width);
var modal = this._$modal.data('bs.modal');
if (modal) {
// v4 method is mistakenly protected
try {
modal._handleUpdate();
} catch (Exception) {
modal.handleUpdate();
}
}
return this;
}
}], [{
key: '_jQueryInterface',
value: function _jQueryInterface(config) {
var _this5 = this;
config = config || {};
return this.each(function () {
var $this = $(_this5);
var _config = $.extend({}, Lightbox.Default, $this.data(), typeof config === 'object' && config);
new Lightbox(_this5, _config);
});
}
}]);
return Lightbox;
})();
$.fn[NAME] = Lightbox._jQueryInterface;
$.fn[NAME].Constructor = Lightbox;
$.fn[NAME].noConflict = function () {
$.fn[NAME] = JQUERY_NO_CONFLICT;
return Lightbox._jQueryInterface;
};
return Lightbox;
})(jQuery);
//# sourceMappingURL=ekko-lightbox.js.map
}(jQuery); | PypiClean |
/Dooders-0.0.3.tar.gz/Dooders-0.0.3/dooders/sdk/utils/short_id.py |
"""Concise UUID generation."""
import binascii
import math
import os
import uuid as _uu
from typing import List, Optional
def int_to_string(
number: int, alphabet: List[str], padding: Optional[int] = None
) -> str:
"""
Convert a number to a string, using the given alphabet.
The output has the most significant digit first.
"""
output = ""
alpha_len = len(alphabet)
while number:
number, digit = divmod(number, alpha_len)
output += alphabet[digit]
if padding:
remainder = max(padding - len(output), 0)
output = output + alphabet[0] * remainder
return output[::-1]
def string_to_int(string: str, alphabet: List[str]) -> int:
"""
Convert a string to a number, using the given alphabet.
The input is assumed to have the most significant digit first.
"""
number = 0
alpha_len = len(alphabet)
for char in string:
number = number * alpha_len + alphabet.index(char)
return number
class ShortUUID:
def __init__(self, alphabet: Optional[str] = None) -> None:
if alphabet is None:
alphabet = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ" "abcdefghijkmnopqrstuvwxyz"
self.set_alphabet(alphabet)
@property
def _length(self) -> int:
"""Return the necessary length to fit the entire UUID given the current alphabet."""
return int(math.ceil(math.log(2**128, self._alpha_len)))
def encode(self, uuid: _uu.UUID, pad_length: Optional[int] = None) -> str:
"""
Encode a UUID into a string (LSB first) according to the alphabet.
If leftmost (MSB) bits are 0, the string might be shorter.
"""
if not isinstance(uuid, _uu.UUID):
raise ValueError("Input `uuid` must be a UUID object.")
if pad_length is None:
pad_length = self._length
return int_to_string(uuid.int, self._alphabet, padding=pad_length)
def decode(self, string: str, legacy: bool = False) -> _uu.UUID:
"""
Decode a string according to the current alphabet into a UUID.
Raises ValueError when encountering illegal characters or a too-long string.
If string too short, fills leftmost (MSB) bits with 0.
Pass `legacy=True` if your UUID was encoded with a ShortUUID version prior to
1.0.0.
"""
if not isinstance(string, str):
raise ValueError("Input `string` must be a str.")
if legacy:
string = string[::-1]
return _uu.UUID(int=string_to_int(string, self._alphabet))
def uuid(self, name: Optional[str] = None, pad_length: Optional[int] = None) -> str:
"""
Generate and return a UUID.
If the name parameter is provided, set the namespace to the provided
name and generate a UUID.
"""
if pad_length is None:
pad_length = self._length
# If no name is given, generate a random UUID.
if name is None:
u = _uu.uuid4()
elif name.lower().startswith(("http://", "https://")):
u = _uu.uuid5(_uu.NAMESPACE_URL, name)
else:
u = _uu.uuid5(_uu.NAMESPACE_DNS, name)
return self.encode(u, pad_length)
def random(self, length: Optional[int] = None) -> str:
"""Generate and return a cryptographically secure short random string of `length`."""
if length is None:
length = self._length
random_num = int(binascii.b2a_hex(os.random(length)), 16)
return int_to_string(random_num, self._alphabet, padding=length)[:length]
def get_alphabet(self) -> str:
"""Return the current alphabet used for new UUIDs."""
return "".join(self._alphabet)
def set_alphabet(self, alphabet: str) -> None:
"""Set the alphabet to be used for new UUIDs."""
# Turn the alphabet into a set and sort it to prevent duplicates
# and ensure reproducibility.
new_alphabet = list(sorted(set(alphabet)))
if len(new_alphabet) > 1:
self._alphabet = new_alphabet
self._alpha_len = len(self._alphabet)
else:
raise ValueError(
"Alphabet with more than " "one unique symbols required.")
def encoded_length(self, num_bytes: int = 16) -> int:
"""Return the string length of the shortened UUID."""
factor = math.log(256) / math.log(self._alpha_len)
return int(math.ceil(factor * num_bytes)) | PypiClean |
/FastNLP-1.0.1.tar.gz/FastNLP-1.0.1/fastNLP/io/loader/conll.py | __all__ = [
"ConllLoader",
"Conll2003Loader",
"Conll2003NERLoader",
"OntoNotesNERLoader",
"CTBLoader",
"CNNERLoader",
"MsraNERLoader",
"WeiboNERLoader",
"PeopleDailyNERLoader"
]
import glob
import os
import random
import shutil
import time
from typing import List
from .loader import Loader
from ..file_reader import _read_conll
# from ...core.const import Const
from fastNLP.core.dataset import DataSet, Instance
class ConllLoader(Loader):
r"""
:class:`ConllLoader` 支持读取的数据格式:以空行隔开两个 sample,除了分割行之外的每一行用空格或者制表符隔开不同的元素。如下例所示::
# 文件中的内容
Nadim NNP B-NP B-PER
Ladki NNP I-NP I-PER
AL-AIN NNP B-NP B-LOC
United NNP B-NP B-LOC
Arab NNP I-NP I-LOC
Emirates NNPS I-NP I-LOC
1996-12-06 CD I-NP O
...
# 如果用以下的参数读取,返回的DataSet将包含raw_words和pos两个field, 这两个field的值分别取自于第0列与第1列
dataset = ConllLoader(headers=['raw_words', 'pos'], indexes=[0, 1])._load('/path/to/train.conll')
# 如果用以下的参数读取,返回的DataSet将包含raw_words和ner两个field, 这两个field的值分别取自于第0列与第2列
dataset = ConllLoader(headers=['raw_words', 'ner'], indexes=[0, 3])._load('/path/to/train.conll')
# 如果用以下的参数读取,返回的DataSet将包含raw_words, pos和ner三个field
dataset = ConllLoader(headers=['raw_words', 'pos', 'ner'], indexes=[0, 1, 3])._load('/path/to/train.conll')
:class:`ConllLoader` 返回的 :class:`~fastNLP.core.DataSet` 的 `field` 由传入的 ``headers`` 确定。
:param headers: 每一列数据的名称, ``header`` 与 ``indexes`` 一一对应
:param sep: 指定分隔符,默认为制表符
:param indexes: 需要保留的数据列下标,从 **0** 开始。若为 ``None`` ,则所有列都保留。
:param dropna: 是否忽略非法数据,若为 ``False`` ,则遇到非法数据时抛出 :class:`ValueError` 。
:param drophashtag: 是否忽略以 ``#`` 开头的句子。
"""
def __init__(self, headers: List[str], sep: str=None, indexes: List[int]=None, dropna: bool=True, drophash: bool=True):
super(ConllLoader, self).__init__()
if not isinstance(headers, (list, tuple)):
raise TypeError(
'invalid headers: {}, should be list of strings'.format(headers))
self.headers = headers
self.dropna = dropna
self.drophash = drophash
self.sep=sep
if indexes is None:
self.indexes = list(range(len(self.headers)))
else:
if len(indexes) != len(headers):
raise ValueError
self.indexes = indexes
def _load(self, path):
r"""
传入的一个文件路径,将该文件读入DataSet中,field由ConllLoader初始化时指定的headers决定。
:param str path: 文件的路径
:return: DataSet
"""
ds = DataSet()
for idx, data in _read_conll(path,sep=self.sep, indexes=self.indexes, dropna=self.dropna,
drophash=self.drophash):
ins = {h: data[i] for i, h in enumerate(self.headers)}
ds.append(Instance(**ins))
return ds
class Conll2003Loader(ConllLoader):
r"""
用于读取 **conll2003** 任务的数据。数据的内容应该类似于以下的内容:第一列为 **raw_words** ,第二列为 **pos** ,
第三列为 **chunking** ,第四列为 **ner** 。
数据中以 ``"-DOCSTART-"`` 开头的行将被忽略,因为该符号在 **conll2003** 中被用为文档分割符。
Example::
Nadim NNP B-NP B-PER
Ladki NNP I-NP I-PER
AL-AIN NNP B-NP B-LOC
United NNP B-NP B-LOC
Arab NNP I-NP I-LOC
Emirates NNPS I-NP I-LOC
1996-12-06 CD I-NP O
...
读取的 :class:`~fastNLP.core.DataSet` 将具备以下的数据结构:
.. csv-table:: 下面是 Conll2003Loader 加载后数据具备的结构。
:header: "raw_words", "pos", "chunk", "ner"
"[Nadim, Ladki]", "[NNP, NNP]", "[B-NP, I-NP]", "[B-PER, I-PER]"
"[AL-AIN, United, Arab, ...]", "[NNP, NNP, NNP, ...]", "[B-NP, B-NP, I-NP, ...]", "[B-LOC, B-LOC, I-LOC, ...]"
"[...]", "[...]", "[...]", "[...]"
"""
def __init__(self):
headers = [
'raw_words', 'pos', 'chunk', 'ner',
]
super(Conll2003Loader, self).__init__(headers=headers)
def _load(self, path):
r"""
传入的一个文件路径,将该文件读入DataSet中,field由ConllLoader初始化时指定的headers决定。
:param str path: 文件的路径
:return: DataSet
"""
ds = DataSet()
for idx, data in _read_conll(path, indexes=self.indexes, dropna=self.dropna):
doc_start = False
for i, h in enumerate(self.headers):
field = data[i]
if str(field[0]).startswith('-DOCSTART-'):
doc_start = True
break
if doc_start:
continue
ins = {h: data[i] for i, h in enumerate(self.headers)}
ds.append(Instance(**ins))
return ds
def download(self, output_dir=None):
raise RuntimeError("conll2003 cannot be downloaded automatically.")
class Conll2003NERLoader(ConllLoader):
r"""
用于读取 **conll2003** 任务的 NER 数据。每一行有 4 列内容,空行意味着隔开两个句子。
支持读取的内容如下::
Nadim NNP B-NP B-PER
Ladki NNP I-NP I-PER
AL-AIN NNP B-NP B-LOC
United NNP B-NP B-LOC
Arab NNP I-NP I-LOC
Emirates NNPS I-NP I-LOC
1996-12-06 CD I-NP O
...
读取的 :class:`~fastNLP.core.DataSet` 将具备以下的数据结构:
.. csv-table:: 下面是 Conll2003Loader 加载后数据具备的结构, target 是 BIO2 编码
:header: "raw_words", "target"
"[Nadim, Ladki]", "[B-PER, I-PER]"
"[AL-AIN, United, Arab, ...]", "[B-LOC, B-LOC, I-LOC, ...]"
"[...]", "[...]"
"""
def __init__(self):
headers = [
'raw_words', 'target',
]
super().__init__(headers=headers, indexes=[0, 3])
def _load(self, path):
r"""
传入的一个文件路径,将该文件读入DataSet中,field由ConllLoader初始化时指定的headers决定。
:param str path: 文件的路径
:return: DataSet
"""
ds = DataSet()
for idx, data in _read_conll(path, indexes=self.indexes, dropna=self.dropna):
doc_start = False
for i, h in enumerate(self.headers):
field = data[i]
if str(field[0]).startswith('-DOCSTART-'):
doc_start = True
break
if doc_start:
continue
ins = {h: data[i] for i, h in enumerate(self.headers)}
ds.append(Instance(**ins))
if len(ds) == 0:
raise RuntimeError("No data found {}.".format(path))
return ds
def download(self):
raise RuntimeError("conll2003 cannot be downloaded automatically.")
class OntoNotesNERLoader(ConllLoader):
r"""
用以读取 **OntoNotes** 的 NER 数据,同时也是 **Conll2012** 的 NER 任务数据。将 **OntoNote** 数据处理为 conll 格式的过程可以参考
https://github.com/yhcc/OntoNotes-5.0-NER。:class:`OntoNotesNERLoader` 将取第 **4** 列和第 **11** 列的内容。
读取的数据格式为::
bc/msnbc/00/msnbc_0000 0 0 Hi UH (TOP(FRAG(INTJ*) - - - Dan_Abrams * -
bc/msnbc/00/msnbc_0000 0 1 everyone NN (NP*) - - - Dan_Abrams * -
...
读取的 :class:`~fastNLP.core.DataSet` 将具备以下的数据结构:
.. csv-table::
:header: "raw_words", "target"
"['Hi', 'everyone', '.']", "['O', 'O', 'O']"
"['first', 'up', 'on', 'the', 'docket']", "['O', 'O', 'O', 'O', 'O']"
"[...]", "[...]"
"""
def __init__(self):
super().__init__(headers=['raw_words', 'target'], indexes=[3, 10])
def _load(self, path: str):
dataset = super()._load(path)
def convert_to_bio(tags):
bio_tags = []
flag = None
for tag in tags:
label = tag.strip("()*")
if '(' in tag:
bio_label = 'B-' + label
flag = label
elif flag:
bio_label = 'I-' + flag
else:
bio_label = 'O'
if ')' in tag:
flag = None
bio_tags.append(bio_label)
return bio_tags
def convert_word(words):
converted_words = []
for word in words:
word = word.replace('/.', '.') # 有些结尾的.是/.形式的
if not word.startswith('-'):
converted_words.append(word)
continue
# 以下是由于这些符号被转义了,再转回来
tfrs = {'-LRB-': '(',
'-RRB-': ')',
'-LSB-': '[',
'-RSB-': ']',
'-LCB-': '{',
'-RCB-': '}'
}
if word in tfrs:
converted_words.append(tfrs[word])
else:
converted_words.append(word)
return converted_words
dataset.apply_field(convert_word, field_name='raw_words', new_field_name='raw_words')
dataset.apply_field(convert_to_bio, field_name='target', new_field_name='target')
return dataset
def download(self):
raise RuntimeError("Ontonotes cannot be downloaded automatically, you can refer "
"https://github.com/yhcc/OntoNotes-5.0-NER to download and preprocess.")
class CTBLoader(Loader):
r"""
**CTB** 数据集的 **Loader**。支持加载的数据应该具备以下格式, 其中第二列为 **词语** ,第四列为 **pos tag** ,第七列为 **依赖树的 head** ,
第八列为 **依赖树的 label** 。
Example::
1 印度 _ NR NR _ 3 nn _ _
2 海军 _ NN NN _ 3 nn _ _
3 参谋长 _ NN NN _ 5 nsubjpass _ _
4 被 _ SB SB _ 5 pass _ _
5 解职 _ VV VV _ 0 root _ _
1 新华社 _ NR NR _ 7 dep _ _
2 新德里 _ NR NR _ 7 dep _ _
3 12月 _ NT NT _ 7 dep _ _
...
读取的 :class:`~fastNLP.core.DataSet` 将具备以下的数据结构:
.. csv-table::
:header: "raw_words", "pos", "dep_head", "dep_label"
"[印度, 海军, ...]", "[NR, NN, SB, ...]", "[3, 3, ...]", "[nn, nn, ...]"
"[新华社, 新德里, ...]", "[NR, NR, NT, ...]", "[7, 7, 7, ...]", "[dep, dep, dep, ...]"
"[...]", "[...]", "[...]", "[...]"
"""
def __init__(self):
super().__init__()
headers = [
'raw_words', 'pos', 'dep_head', 'dep_label',
]
indexes = [
1, 3, 6, 7,
]
self.loader = ConllLoader(headers=headers, indexes=indexes)
def _load(self, path: str):
dataset = self.loader._load(path)
return dataset
def download(self):
r"""
由于版权限制,不能提供自动下载功能。可参考
https://catalog.ldc.upenn.edu/LDC2013T21
"""
raise RuntimeError("CTB cannot be downloaded automatically.")
class CNNERLoader(Loader):
r"""
支持加载形如以下格式的内容,一行两列,以空格隔开两个 sample
Example::
我 O
们 O
变 O
而 O
以 O
书 O
会 O
...
"""
def _load(self, path: str):
"""
:param path: 文件路径
:return: :class:`~fastNLP.core.DataSet` ,包含 ``raw_words`` 列和 ``target`` 列
"""
ds = DataSet()
with open(path, 'r', encoding='utf-8') as f:
raw_chars = []
target = []
for line in f:
line = line.strip()
if line:
parts = line.split()
if len(parts) == 1: # 网上下载的数据有一些列少tag,默认补充O
parts.append('O')
raw_chars.append(parts[0])
target.append(parts[1])
else:
if raw_chars:
ds.append(Instance(raw_chars=raw_chars, target=target))
raw_chars = []
target = []
return ds
class MsraNERLoader(CNNERLoader):
r"""
读取 **MSRA-NER** 数据,如果您要使用该数据,请引用以下的文章:
Gina-Anne Levow, 2006, The Third International Chinese Language Processing Bakeoff: Word Segmentation and Named Entity Recognition.
数据中的格式应该类似于下列的内容::
把 O
欧 B-LOC
美 B-LOC
、 O
港 B-LOC
台 B-LOC
流 O
行 O
的 O
食 O
...
读取的 :class:`~fastNLP.core.DataSet` 将具备以下的数据结构:
.. csv-table::
:header: "raw_chars", "target"
"['把', '欧'] ", "['O', 'B-LOC']"
"['美', '、']", "['B-LOC', 'O']"
"[...]", "[...]"
"""
def __init__(self):
super().__init__()
def download(self, dev_ratio: float = 0.1, re_download: bool = False) -> str:
r"""
自动下载 **MSAR-NER** 的数据。
下载完成后在 ``output_dir`` 中有 ``train.conll`` , ``test.conll`` , ``dev.conll`` 三个文件。
如果 ``dev_ratio`` 为 0,则只有 ``train.conll`` 和 ``test.conll`` 。
:param dev_ratio: 如果路径中没有验证集 ,从 train 划分多少作为 dev 的数据。如果为 **0** ,则不划分 dev
:param re_download: 是否重新下载数据,以重新切分数据。
:return: 数据集的目录地址
:return:
"""
dataset_name = 'msra-ner'
data_dir = self._get_dataset_path(dataset_name=dataset_name)
modify_time = 0
for filepath in glob.glob(os.path.join(data_dir, '*')):
modify_time = os.stat(filepath).st_mtime
break
if time.time() - modify_time > 1 and re_download: # 通过这种比较丑陋的方式判断一下文件是否是才下载的
shutil.rmtree(data_dir)
data_dir = self._get_dataset_path(dataset_name=dataset_name)
if not os.path.exists(os.path.join(data_dir, 'dev.conll')):
if dev_ratio > 0:
assert 0 < dev_ratio < 1, "dev_ratio should be in range (0,1)."
try:
with open(os.path.join(data_dir, 'train.conll'), 'r', encoding='utf-8') as f, \
open(os.path.join(data_dir, 'middle_file.conll'), 'w', encoding='utf-8') as f1, \
open(os.path.join(data_dir, 'dev.conll'), 'w', encoding='utf-8') as f2:
lines = [] # 一个sample包含很多行
for line in f:
line = line.strip()
if line:
lines.append(line)
else:
if random.random() < dev_ratio:
f2.write('\n'.join(lines) + '\n\n')
else:
f1.write('\n'.join(lines) + '\n\n')
lines.clear()
os.remove(os.path.join(data_dir, 'train.conll'))
os.renames(os.path.join(data_dir, 'middle_file.conll'), os.path.join(data_dir, 'train.conll'))
finally:
if os.path.exists(os.path.join(data_dir, 'middle_file.conll')):
os.remove(os.path.join(data_dir, 'middle_file.conll'))
return data_dir
class WeiboNERLoader(CNNERLoader):
r"""
读取 **WeiboNER** 数据,如果您要使用该数据,请引用以下的文章:
Nanyun Peng and Mark Dredze, 2015, Named Entity Recognition for Chinese Social Media with Jointly Trained Embeddings.
数据中的格式应该类似与下列的内容::
老 B-PER.NOM
百 I-PER.NOM
姓 I-PER.NOM
心 O
...
读取的 :class:`~fastNLP.core.DataSet` 将具备以下的数据结构:
.. csv-table::
:header: "raw_chars", "target"
"['老', '百', '姓']", "['B-PER.NOM', 'I-PER.NOM', 'I-PER.NOM']"
"['心']", "['O']"
"[...]", "[...]"
"""
def __init__(self):
super().__init__()
def download(self) -> str:
r"""
自动下载 **Weibo-NER** 的数据。
:return: 数据集目录地址
"""
dataset_name = 'weibo-ner'
data_dir = self._get_dataset_path(dataset_name=dataset_name)
return data_dir
class PeopleDailyNERLoader(CNNERLoader):
r"""
加载 **People's Daily NER** 数据集的 **Loader** 。支持加载的数据格式如下::
中 B-ORG
共 I-ORG
中 I-ORG
央 I-ORG
致 O
中 B-ORG
...
读取的 :class:`~fastNLP.core.DataSet` 将具备以下的数据结构:
.. csv-table:: target 列是基于 BIO 的编码方式
:header: "raw_chars", "target"
"['中', '共', '中', '央']", "['B-ORG', 'I-ORG', 'I-ORG', 'I-ORG']"
"[...]", "[...]"
"""
def __init__(self):
super().__init__()
def download(self) -> str:
"""
自动下载数据集。
:return: 数据集目录地址
"""
dataset_name = 'peopledaily'
data_dir = self._get_dataset_path(dataset_name=dataset_name)
return data_dir | PypiClean |
/BOHB_HPO-0.5.2.tar.gz/BOHB_HPO-0.5.2/README.md | Bayesian Optimization Hyperband Hyperparameter Optimization
===========================================================
Implementation for [BOHB](http://proceedings.mlr.press/v80/falkner18a.html)
## Requirements
- numpy
- scipy
- statsmodels
- dask
- torch (example)
## Installation
```bash
pip3 install bohb-hpo
```
## Usage
``` Python
from bohb import BOHB
import bohb.configspace as cs
def objective(step, alpha, beta):
return 1 / (alpha * step + 0.1) + beta
def evaluate(params, n_iterations):
loss = 0.0
for i in range(int(n_iterations)):
loss += objective(**params, step=i)
return loss/n_iterations
if __name__ == '__main__':
alpha = cs.CategoricalHyperparameter('alpha', [0.001, 0.01, 0.1])
beta = cs.CategoricalHyperparameter('beta', [1, 2, 3])
configspace = cs.ConfigurationSpace([alpha, beta])
opt = BOHB(configspace, evaluate, max_budget=10, min_budget=1)
# Parallel
# opt = BOHB(configspace, evaluate, max_budget=10, min_budget=1, n_proc=4)
logs = opt.optimize()
```
See [examples](https://github.com/goktug97/bohb-hpo/tree/master/examples)
### Configspace Examples
- Basic
```python
import dehb.configspace as cs
lr = cs.UniformHyperparameter('lr', 1e-4, 1e-1, log=True)
batch_size = cs.CategoricalHyperparameter('batch_size', [8, 16, 32])
configspace = cs.ConfigurationSpace([lr, batch_size], seed=123)
```
- Conditional Parameters
```python
import bohb.configspace as cs
a = cs.IntegerUniformHyperparameter('a', 0, 4)
b = cs.CategoricalHyperparameter('b', ['a', 'b', 'c'], a == 0)
b_default = cs.CategoricalHyperparameter('b', ['d'], ~b.cond)
configspace = cs.ConfigurationSpace([a, b, b_default], seed=123)
```
- Complex Conditional Parameters
```python
import bohb.configspace as cs
a = cs.IntegerUniformHyperparameter('a', 0, 4)
b1 = cs.UniformHyperparameter('b', 0, 0.5, a <= 1)
b2 = cs.UniformHyperparameter('b', 0.5, 1, ~b1.cond)
c1 = cs.CategoricalHyperparameter('c', ['a', 'b', 'c'], b1 < 0.25)
c2 = cs.CategoricalHyperparameter('c', ['c', 'd', 'e'], ~c1.cond)
d1 = cs.UniformHyperparameter('d', 0, 1, (b1 < 0.125) & (c1 == 'b'))
d2 = cs.NormalHyperparameter('d', 0, 0.1, (b1 > 0.125) & (c1 == 'c'))
d3 = cs.IntegerNormalHyperparameter('d', 5, 10, (b2 > 0.750) & (c2 == 'd'))
d4 = cs.UniformHyperparameter('d', 0, 0, ~(d1.cond | d2.cond | d3.cond))
configspace = cs.ConfigurationSpace([a, b1, b2, c1, c2, d1, d2, d3, d4], seed=123)
```
## License
bohb-hpo is licensed under the MIT License.
| PypiClean |
/AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/toolbox/interface_windows_tools.py |
import os
try:
import win32gui
except Exception as ex:
print('Cant import win32gui (probably CI build on linux)' + str(ex))
try:
import win32con
except Exception as ex:
print('Cant import win32gui (probably CI build on linux)')
try:
import win32api
except Exception as ex:
print('Cant import win32gui (probably CI build on linux)')
try:
import win32com.client
except Exception as ex:
print('Cant import win32gui (probably CI build on linux)')
root_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + "..")
print(root_folder)
def get_window_by_caption(caption):
"""
finds the window by caption and returns handle (int)
"""
try:
hwnd = win32gui.FindWindow(None, caption)
return hwnd
except Exception as ex:
print('error calling win32gui.FindWindow ' + str(ex))
return -1
def send_text(hwnd, txt):
"""
sends the text 'txt' to the window handle hwnd using SendMessage
"""
try:
for c in txt:
if c == '\n':
win32api.SendMessage(hwnd, win32con.WM_KEYDOWN, win32con.VK_RETURN, 0)
win32api.SendMessage(hwnd, win32con.WM_KEYUP, win32con.VK_RETURN, 0)
else:
win32api.SendMessage(hwnd, win32con.WM_CHAR, ord(c), 0)
except Exception as ex:
print('error calling SendMessage ' + str(ex))
def launch_app(app_path, params=[], time_before_kill_app=15):
"""
start an app
"""
import subprocess
try:
res = subprocess.call([app_path, params], timeout=time_before_kill_app, shell=True)
print('res = ', res)
if res == 0:
return True
else:
return False
except Exception as ex:
print('error launching app ' + str(app_path) + ' with params ' + str(params) + '\n' + str(ex))
return False
def app_activate(caption):
"""
use shell to bring the application with caption to front
"""
try:
shell = win32com.client.Dispatch("WScript.Shell")
shell.AppActivate(caption)
except Exception as ex:
print('error calling win32com.client.Dispatch (AppActivate)')
def close_app(caption):
"""
close an app
"""
pass
def send_keys(key_string):
"""
sends the text or keys to the active application using shell
Note, that the imp module shows deprecation warning.
Examples:
shell.SendKeys("^a") # CTRL+A
shell.SendKeys("{DELETE}") # Delete key
shell.SendKeys("hello this is a lot of text with a //")
"""
try:
shell = win32com.client.Dispatch("WScript.Shell")
shell.SendKeys(key_string)
except Exception as ex:
print('error calling win32com.client.Dispatch (SendKeys)') | PypiClean |
/Flashcards%20Terminal%20App-0.0.1.tar.gz/Flashcards Terminal App-0.0.1/flashcardscode/main.py | import accountlogin
import studyoptions
import setcreation as sc
import deletion
import config as cg
def menu_input():
option = -1
while True:
option = input('\nEnter 1-7: ')
try:
if 1 <= int(option) <= 7:
break
else:
print('Error: Please enter a number 1-7')
except:
print('Error: Please enter a number 1-7')
return option
def main():
print('\n---------------------------------------------- Welcome to the Flashcards Study App ----------------------------------------------')
print('This app allows you to create study sets in app and import study sets from both .CSV files and Quizlet sets.')
print('You also have multiple study options: Flashcards and Self-Testing. Within both options, you can choose if flashcards are ')
print('presented in their inputted order or a random order. You also have a choice to see the term or definition first.')
print('To use this app, you will need to have an account. You can also delete your study sets or your account at any time.')
input('To find your created study sets, locate them in your documents folder. Press enter when you are ready to create/log into an account.')
#Logs in current user - creates account if necessary.
accountlogin.main()
run_loop = True
while run_loop:
studysetCheck = sc.study_set_check('current')
print('\nYou have seven possible actions:')
print('1. Create and save a flashcard set')
print('2. Import a Quizlet')
print('3. Import a CSV file located in your desktop')
print('4. Study from a created set')
print('5. Delete a Study Set')
print('6. Delete your account')
print('7. End the program')
option = int(menu_input())
if option == 1:
studysetCheck = True
sc.create_and_save_studyset()
elif option == 2:
studysetCheck = True
sc.import_quizlet()
elif option == 3:
studysetCheck = True
sc.import_csv()
elif option == 4:
if studysetCheck == False:
print('\nYou cannot study from a study set since you do not have any. Please create or import a study set.\n')
else:
sc.study_set_check('current')
studyoptions.main()
elif option == 5:
if studysetCheck != False:
studysetCheck = sc.study_set_check('current')
while (studysetCheck != False):
studysetCheck = deletion.set_deletion()
break
else:
print('\nYou cannot delete any study sets as you do not have any. Please select a different option.\n')
elif option == 6:
if cg.error_control('yes', 'no', '\nAre you sure you want to delete your account? You cannot reverse this. Enter Yes or No. ') == True:
deletion.user_deletion()
print('\nPROGRAM ENDED\n')
return
elif option == 7:
run_loop = False
print('\nPROGRAM ENDED\n')
main() | PypiClean |
/GeneLearn-0.0.0.tar.gz/GeneLearn-0.0.0/docs/_build/html/_static/language_data.js | var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"];
/* Non-minified version JS is _stemmer.js if file is provided */
/**
* Porter Stemmer
*/
var Stemmer = function() {
var step2list = {
ational: 'ate',
tional: 'tion',
enci: 'ence',
anci: 'ance',
izer: 'ize',
bli: 'ble',
alli: 'al',
entli: 'ent',
eli: 'e',
ousli: 'ous',
ization: 'ize',
ation: 'ate',
ator: 'ate',
alism: 'al',
iveness: 'ive',
fulness: 'ful',
ousness: 'ous',
aliti: 'al',
iviti: 'ive',
biliti: 'ble',
logi: 'log'
};
var step3list = {
icate: 'ic',
ative: '',
alize: 'al',
iciti: 'ic',
ical: 'ic',
ful: '',
ness: ''
};
var c = "[^aeiou]"; // consonant
var v = "[aeiouy]"; // vowel
var C = c + "[^aeiouy]*"; // consonant sequence
var V = v + "[aeiou]*"; // vowel sequence
var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
var s_v = "^(" + C + ")?" + v; // vowel in stem
this.stemWord = function (w) {
var stem;
var suffix;
var firstch;
var origword = w;
if (w.length < 3)
return w;
var re;
var re2;
var re3;
var re4;
firstch = w.substr(0,1);
if (firstch == "y")
w = firstch.toUpperCase() + w.substr(1);
// Step 1a
re = /^(.+?)(ss|i)es$/;
re2 = /^(.+?)([^s])s$/;
if (re.test(w))
w = w.replace(re,"$1$2");
else if (re2.test(w))
w = w.replace(re2,"$1$2");
// Step 1b
re = /^(.+?)eed$/;
re2 = /^(.+?)(ed|ing)$/;
if (re.test(w)) {
var fp = re.exec(w);
re = new RegExp(mgr0);
if (re.test(fp[1])) {
re = /.$/;
w = w.replace(re,"");
}
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1];
re2 = new RegExp(s_v);
if (re2.test(stem)) {
w = stem;
re2 = /(at|bl|iz)$/;
re3 = new RegExp("([^aeiouylsz])\\1$");
re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re2.test(w))
w = w + "e";
else if (re3.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
else if (re4.test(w))
w = w + "e";
}
}
// Step 1c
re = /^(.+?)y$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(s_v);
if (re.test(stem))
w = stem + "i";
}
// Step 2
re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step2list[suffix];
}
// Step 3
re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
suffix = fp[2];
re = new RegExp(mgr0);
if (re.test(stem))
w = stem + step3list[suffix];
}
// Step 4
re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
re2 = /^(.+?)(s|t)(ion)$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
if (re.test(stem))
w = stem;
}
else if (re2.test(w)) {
var fp = re2.exec(w);
stem = fp[1] + fp[2];
re2 = new RegExp(mgr1);
if (re2.test(stem))
w = stem;
}
// Step 5
re = /^(.+?)e$/;
if (re.test(w)) {
var fp = re.exec(w);
stem = fp[1];
re = new RegExp(mgr1);
re2 = new RegExp(meq1);
re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
w = stem;
}
re = /ll$/;
re2 = new RegExp(mgr1);
if (re.test(w) && re2.test(w)) {
re = /.$/;
w = w.replace(re,"");
}
// and turn initial Y back to y
if (firstch == "y")
w = firstch.toLowerCase() + w.substr(1);
return w;
}
}
var splitChars = (function() {
var result = {};
var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648,
1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702,
2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971,
2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345,
3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761,
3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823,
4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125,
8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695,
11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587,
43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141];
var i, j, start, end;
for (i = 0; i < singles.length; i++) {
result[singles[i]] = true;
}
var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709],
[722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161],
[1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568],
[1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807],
[1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047],
[2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383],
[2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450],
[2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547],
[2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673],
[2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820],
[2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946],
[2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023],
[3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173],
[3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332],
[3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481],
[3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718],
[3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791],
[3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095],
[4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205],
[4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687],
[4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968],
[4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869],
[5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102],
[6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271],
[6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592],
[6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822],
[6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167],
[7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959],
[7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143],
[8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318],
[8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483],
[8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101],
[10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567],
[11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292],
[12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444],
[12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783],
[12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311],
[19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511],
[42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774],
[42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071],
[43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263],
[43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519],
[43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647],
[43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967],
[44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295],
[57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274],
[64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007],
[65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381],
[65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]];
for (i = 0; i < ranges.length; i++) {
start = ranges[i][0];
end = ranges[i][1];
for (j = start; j <= end; j++) {
result[j] = true;
}
}
return result;
})();
function splitQuery(query) {
var result = [];
var start = -1;
for (var i = 0; i < query.length; i++) {
if (splitChars[query.charCodeAt(i)]) {
if (start !== -1) {
result.push(query.slice(start, i));
start = -1;
}
} else if (start === -1) {
start = i;
}
}
if (start !== -1) {
result.push(query.slice(start));
}
return result;
} | PypiClean |
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/numpy/rotations.py | from aerosandbox.numpy import sin, cos, linalg
from aerosandbox.numpy.array import array
import numpy as _onp
from typing import Union, List
def rotation_matrix_2D(
angle,
as_array: bool = True,
):
"""
Gives the 2D rotation matrix associated with a counterclockwise rotation about an angle.
Args:
angle: Angle by which to rotate. Given in radians.
as_array: Determines whether to return an array-like or just a simple list of lists.
Returns: The 2D rotation matrix
"""
s = sin(angle)
c = cos(angle)
rot = [
[c, -s],
[s, c]
]
if as_array:
return array(rot)
else:
return rot
def rotation_matrix_3D(
angle: Union[float, _onp.ndarray],
axis: Union[_onp.ndarray, List, str],
as_array: bool = True,
axis_already_normalized: bool = False
):
"""
Yields the rotation matrix that corresponds to a rotation by a specified amount about a given axis.
An implementation of https://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle
Args:
angle: The angle to rotate by. [radians]
Direction of rotation corresponds to the right-hand rule.
Can be vectorized.
axis: The axis to rotate about. [ndarray]
Can be vectorized; be sure axis[0] yields all the x-components, etc.
as_array: boolean, returns a 3x3 array-like if True, and a list-of-lists otherwise.
If you are intending to use this function vectorized, it is recommended you flag this False. (Or test before
proceeding.)
axis_already_normalized: boolean, skips axis normalization for speed if you flag this true.
Returns:
The rotation matrix, with type according to the parameter `as_array`.
"""
s = sin(angle)
c = cos(angle)
if isinstance(axis, str):
if axis.lower() == "x":
rot = [
[1, 0, 0],
[0, c, -s],
[0, s, c]
]
elif axis.lower() == "y":
rot = [
[c, 0, s],
[0, 1, 0],
[-s, 0, c]
]
elif axis.lower() == "z":
rot = [
[c, -s, 0],
[s, c, 0],
[0, 0, 1]
]
else:
raise ValueError("If `axis` is a string, it must be `x`, `y`, or `z`.")
else:
ux = axis[0]
uy = axis[1]
uz = axis[2]
if not axis_already_normalized:
norm = (ux ** 2 + uy ** 2 + uz ** 2) ** 0.5
ux = ux / norm
uy = uy / norm
uz = uz / norm
rot = [
[c + ux ** 2 * (1 - c), ux * uy * (1 - c) - uz * s, ux * uz * (1 - c) + uy * s],
[uy * ux * (1 - c) + uz * s, c + uy ** 2 * (1 - c), uy * uz * (1 - c) - ux * s],
[uz * ux * (1 - c) - uy * s, uz * uy * (1 - c) + ux * s, c + uz ** 2 * (1 - c)]
]
if as_array:
return array(rot)
else:
return rot
def rotation_matrix_from_euler_angles(
roll_angle: Union[float, _onp.ndarray] = 0,
pitch_angle: Union[float, _onp.ndarray] = 0,
yaw_angle: Union[float, _onp.ndarray] = 0,
as_array: bool = True
):
"""
Yields the rotation matrix that corresponds to a given Euler angle rotation.
Note: This uses the standard (yaw, pitch, roll) Euler angle rotation, where:
* First, a rotation about x is applied (roll)
* Second, a rotation about y is applied (pitch)
* Third, a rotation about z is applied (yaw)
In other words: R = R_z(yaw) @ R_y(pitch) @ R_x(roll).
Note: To use this, pre-multiply your vector to go from body axes to earth axes.
Example:
>>> vector_earth = rotation_matrix_from_euler_angles(np.pi / 4, np.pi / 4, np.pi / 4) @ vector_body
See notes:
http://planning.cs.uiuc.edu/node102.html
Args:
roll_angle: The roll angle, which is a rotation about the x-axis. [radians]
pitch_angle: The pitch angle, which is a rotation about the y-axis. [radians]
yaw_angle: The yaw angle, which is a rotation about the z-axis. [radians]
as_array:
Returns:
"""
sa = sin(yaw_angle)
ca = cos(yaw_angle)
sb = sin(pitch_angle)
cb = cos(pitch_angle)
sc = sin(roll_angle)
cc = cos(roll_angle)
rot = [
[ca * cb, ca * sb * sc - sa * cc, ca * sb * cc + sa * sc],
[sa * cb, sa * sb * sc + ca * cc, sa * sb * cc - ca * sc],
[-sb, cb * sc, cb * cc]
]
if as_array:
return array(rot)
else:
return rot
def is_valid_rotation_matrix(
a: _onp.ndarray,
tol=1e-9
) -> bool:
"""
Returns a boolean of whether the given matrix satisfies the properties of a rotation matrix.
Specifically, tests for:
* Volume-preserving
* Handedness of output reference frame
* Orthogonality of output reference frame
Args:
a: The array-like to be tested
tol: A tolerance to use for truthiness; accounts for floating-point error.
Returns: A boolean of whether the array-like is a valid rotation matrix.
"""
def approx_equal(x, y):
return (x > y - tol) and (x < y + tol)
det = linalg.det(a)
is_volume_preserving_and_right_handed = approx_equal(det, 1)
eye_approx = a.T @ a
eye = _onp.eye(a.shape[0])
is_orthogonality_preserving = True
for i in range(eye.shape[0]):
for j in range(eye.shape[1]):
if not approx_equal(eye_approx[i, j], eye[i, j]):
is_orthogonality_preserving = False
return (
is_volume_preserving_and_right_handed and
is_orthogonality_preserving
) | PypiClean |
/MR-OCTOPUS-0.2.7.tar.gz/MR-OCTOPUS-0.2.7/OCTOPUS/recon/imtransforms.py | import numpy.fft as npfft
import numpy as np
import pynufft
from math import pi
def im2ksp(M, cartesian_opt, NufftObj=None, params=None):
'''Image to k-space transformation
Parameters
----------
M : numpy.ndarray
Image data
cartesian_opt : int
Cartesian = 1, Non-Cartesian = 0.
NufftObj : pynufft.linalg.nufft_cpu.NUFFT_cpu
Non-uniform FFT Object for non-cartesian transformation. Default is None.
params : dict
Sequence parameters. Default is None.
Returns
-------
kspace : numpy.ndarray
k-space data
'''
if cartesian_opt == 1:
kspace = npfft.fftshift(npfft.fft2(M))
elif cartesian_opt == 0:
# Sample phantom along ktraj
if 'Npoints' not in params:
raise ValueError('The number of acquisition points is missing')
if 'Nshots' not in params:
raise ValueError('The number of shots is missing')
kspace = NufftObj.forward(M).reshape((params['Npoints'], params['Nshots'])) # sampled image
else:
raise ValueError('Cartesian option should be either 0 or 1')
return kspace
def ksp2im(ksp, cartesian_opt, NufftObj=None, params=None):
'''K-space to image transformation
Parameters
----------
ksp : numpy.ndarray
K-space data
cartesian_opt : int
Cartesian = 1, Non-Cartesian = 0.
NufftObj : pynufft.linalg.nufft_cpu.NUFFT_cpu
Non-uniform FFT Object for non-cartesian transformation. Default is None.
params : dict
Sequence parameters. Default is None.
Returns
-------
im : numpy.ndarray
Image data
'''
if cartesian_opt == 1:
im = npfft.ifft2(npfft.fftshift(ksp))
#im = npfft.ifftshift(npfft.ifft2(ksp))
elif cartesian_opt == 0:
if 'Npoints' not in params:
raise ValueError('The number of acquisition points is missing')
if 'Nshots' not in params:
raise ValueError('The number of shots is missing')
if 'dcf' in params:
ksp_dcf = ksp.reshape((params['Npoints']*params['Nshots'],))*params['dcf']
im = NufftObj.adjoint(ksp_dcf) # * np.prod(sqrt(4 * params['N'] ** 2))
else:
ksp_dcf = ksp.reshape((params['Npoints'] * params['Nshots'],))
im = NufftObj.solve(ksp_dcf, solver='cg', maxiter=50)
#im = NufftObj.solve(ksp_dcf, solver='L1TVOLS', maxiter=50, rho=0.1)
else:
raise ValueError('Cartesian option should be either 0 or 1')
return im
def nufft_init(kt, params):
'''Initializes the Non-uniform FFT object
Parameters
----------
kt : numpy.ndarray
K-space trajectory
params : dict
Sequence parameters.
Returns
-------
NufftObj : pynufft.linalg.nufft_cpu.NUFFT_cpu
Non-uniform FFT Object for non-cartesian transformation
'''
if 'Npoints' not in params:
raise ValueError('The number of acquisition points is missing')
if 'Nshots' not in params:
raise ValueError('The number of shots is missing')
if 'N' not in params:
raise ValueError('The matrix size is missing')
kt_sc = pi / abs(np.max(kt))
kt = kt * kt_sc # pyNUFFT scaling [-pi, pi]
om = np.zeros((params['Npoints'] * params['Nshots'], 2))
om[:, 0] = np.real(kt).flatten()
om[:, 1] = np.imag(kt).flatten()
NufftObj = pynufft.NUFFT_cpu() # Create a pynufft object
Nd = (params['N'], params['N']) # image size
Kd = (2 * params['N'], 2 * params['N']) # k-space size
Jd = (6, 6) # interpolation size
NufftObj.plan(om, Nd, Kd, Jd)
return NufftObj | PypiClean |
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/utils/uri_builder.py | from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import re
try:
# Python3
from urllib.parse import (
urlparse, urljoin,
urlsplit, urlunsplit,
quote, unquote
)
from urllib.request import urlopen, Request
from urllib.error import HTTPError
except ImportError:
# Python2
from urlparse import urlparse, urljoin, urlsplit, urlunsplit
from urllib2 import urlopen, Request, HTTPError
from urllib import quote, unquote
# From RFC 3986:
gen_delims = ":/?#[]@"
sub_delims = "!$&'()*+,;="
unreserved = "-._~"
# subset of above safe in query string (no "?", "&" or #")
query_safe = re.sub('[?&#]', '', gen_delims + sub_delims + unreserved)
def uri_base(uri):
"""
Get the base URI from the supplied URI by removing any parameters and/or fragments.
"""
base_uri = uri.split("#", 1)[0]
base_uri = base_uri.split("?", 1)[0]
return base_uri
def uri_query_key_val(p):
"""
Returns a key-value pair for a supplied query parameter string.
The value part returned has %-escaping unapplied.
If no '=' is present, the value part returned is an empty string.
"""
kv = p.split("=", 1) + [""]
return (kv[0], unquote(kv[1]))
def uri_param_dict(uri):
"""
Extract parameter dictionary from the supplied URI
>>> uri_param_dict("base:path?q1=p1&q2=p2#frag") == { 'q1': "p1", 'q2': "p2"}
True
>>> uri_param_dict("base:path?q1=p1=p1&q2=p2%26p2&q3") == { 'q1': "p1=p1", 'q2': "p2&p2", 'q3': "" }
True
"""
base_uri = uri.split("#", 1)[0]
query = (base_uri.split("?", 1)+[""])[1]
return { k: v for k, v in [ uri_query_key_val(qp) for qp in query.split("&") ] }
def build_dict(*param_dicts, **param_dict):
"""
Create a merged dictionary from the supplied dictionaries and keyword parameters.
"""
merged_param_dict = param_dict.copy()
for d in param_dicts:
if d is not None:
# log.info("param_dicts %r"%(d,))
merged_param_dict.update(d)
return merged_param_dict
def uri_params(*param_dicts, **param_dict):
"""
Construct a URI parameter string from the supplied dictionary values
(or values which are convertible to a dictionary using `dict()`).
"""
uri_param_dict = build_dict(*param_dicts, **param_dict)
uri_param_str = ""
next_sep = "?"
for pnam in uri_param_dict:
pval = uri_param_dict[pnam]
if pval:
# log.info("pnam %s, pval %s, uri_param_dict %r"%(pnam, pval, uri_param_dict))
uri_param_str += next_sep + pnam + "=" + quote(pval, query_safe)
next_sep = "&"
return uri_param_str
def uri_with_params(base_uri, *param_dicts, **param_dict):
"""
Construct a URI from the supplied base URI (with any parameters and/or fragment removed)
and URI parameters created using the supplied dictionary values.
"""
return uri_base(base_uri) + uri_params(*param_dicts, **param_dict)
if __name__ == "__main__":
import doctest
doctest.testmod()
# End. | PypiClean |
/Cactus-3.3.3.tar.gz/Cactus-3.3.3/cactus/utils/helpers.py | import hashlib
from functools import partial
import six
class CaseInsensitiveDict(dict):
def __init__(self, obj = None, **kwargs):
if obj is not None:
if isinstance(obj, dict):
for k, v in obj.items():
self[k] = v
else:
for k, v in obj:
self[k] = v
for k, v in kwargs.items():
self[k] = v
def __setitem__(self, key, value):
super(CaseInsensitiveDict, self).__setitem__(key.lower(), value)
def __getitem__(self, key):
return super(CaseInsensitiveDict, self).__getitem__(key.lower())
def __delitem__(self, key):
return super(CaseInsensitiveDict, self).__delitem__(key.lower())
def __contains__(self, key):
return super(CaseInsensitiveDict, self).__contains__(key.lower())
def pop(self, key):
return super(CaseInsensitiveDict, self).pop(key.lower())
class memoize(object):
"""
Memoize the return parameters of a function.
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype=None):
if obj is None:
return self.func
return partial(self, obj)
def __call__(self, *args, **kw):
obj = args[0]
try:
cache = obj.__cache
except AttributeError:
cache = obj.__cache = {}
key = (self.func, args[1:], frozenset(kw.items()))
try:
res = cache[key]
except KeyError:
res = cache[key] = self.func(*args, **kw)
return res
if six.PY3:
def map_apply(fn, iterable):
return list(map(fn, iterable))
else:
map_apply = map
def checksum(s):
"""
Calculate the checksum of a string.
Should eventually support files too.
We use MD5 because S3 does.
"""
return hashlib.md5(s).hexdigest()
def get_or_prompt(config, key, prompt_fn, *args, **kwargs):
"""
:param config: The configuration object to get the value from
:param key: The configuration key to retrieve
:type key: str
:param prompt_fn: The prompt function to use to prompt the value
:param args: Extra arguments for the prompt function
:param kwargs: Extra keyword arguments for hte prompt function
"""
value = config.get(key)
if value is None:
value = prompt_fn(*args, **kwargs)
config.set(key, value)
return value | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dijit/MenuBar.js.uncompressed.js | require({cache:{
'url:dijit/templates/MenuBar.html':"<div class=\"dijitMenuBar dijitMenuPassive\" data-dojo-attach-point=\"containerNode\" role=\"menubar\" tabIndex=\"${tabIndex}\" data-dojo-attach-event=\"onkeypress: _onKeyPress\"></div>\n"}});
define("dijit/MenuBar", [
"dojo/_base/declare", // declare
"dojo/_base/event", // event.stop
"dojo/keys", // keys.DOWN_ARROW
"./_MenuBase",
"dojo/text!./templates/MenuBar.html"
], function(declare, event, keys, _MenuBase, template){
/*=====
var _MenuBase = dijit._MenuBase;
=====*/
// module:
// dijit/MenuBar
// summary:
// A menu bar, listing menu choices horizontally, like the "File" menu in most desktop applications
return declare("dijit.MenuBar", _MenuBase, {
// summary:
// A menu bar, listing menu choices horizontally, like the "File" menu in most desktop applications
templateString: template,
baseClass: "dijitMenuBar",
// _isMenuBar: [protected] Boolean
// This is a MenuBar widget, not a (vertical) Menu widget.
_isMenuBar: true,
postCreate: function(){
var l = this.isLeftToRight();
this.connectKeyNavHandlers(
l ? [keys.LEFT_ARROW] : [keys.RIGHT_ARROW],
l ? [keys.RIGHT_ARROW] : [keys.LEFT_ARROW]
);
// parameter to dijit.popup.open() about where to put popup (relative to this.domNode)
this._orient = ["below"];
},
focusChild: function(item){
// overload focusChild so that whenever the focus is moved to a new item,
// check the previous focused whether it has its popup open, if so, after
// focusing the new item, open its submenu immediately
var prev_item = this.focusedChild,
showpopup = prev_item && prev_item.popup && prev_item.popup.isShowingNow;
this.inherited(arguments);
if(showpopup && item.popup && !item.disabled){
this._openPopup(); // TODO: on down arrow, _openPopup() is called here and in onItemClick()
}
},
_onKeyPress: function(/*Event*/ evt){
// summary:
// Handle keyboard based menu navigation.
// tags:
// protected
if(evt.ctrlKey || evt.altKey){ return; }
switch(evt.charOrCode){
case keys.DOWN_ARROW:
this._moveToPopup(evt);
event.stop(evt);
}
},
onItemClick: function(/*dijit._Widget*/ item, /*Event*/ evt){
// summary:
// Handle clicks on an item. Cancels a dropdown if already open.
// tags:
// private
if(item.popup && item.popup.isShowingNow){
item.popup.onCancel();
}else{
this.inherited(arguments);
}
}
});
}); | PypiClean |
/DLTKDL-51.0.8-py3-none-any.whl/dltkdl/text/shallownlp/ner.py | from .imports import *
class NER:
def __init__(self, lang='en', predictor_path=None):
"""
pretrained NER.
Only English and Chinese are currenty supported.
Args:
lang(str): Currently, one of {'en', 'zh', 'ru'}: en=English , zh=Chinese, or ru=Russian
"""
if lang is None:
raise ValueError('lang is required (e.g., "en" for English, "zh" for Chinese, "ru" for Russian, etc.')
if predictor_path is None and lang not in ['en', 'zh', 'ru']:
raise ValueError("Unsupported language: if predictor_path is None, then lang must be " +\
"'en' for English, 'zh' for Chinese, or 'ru' for Chinese")
self.lang = lang
if os.environ.get('DISABLE_V2_BEHAVIOR', None) != '1':
warnings.warn("Please add os.environ['DISABLE_V2_BEHAVIOR'] = '1' at top of your script or notebook")
msg = "\nNER in ktrain uses the CRF module from keras_contrib, which is not yet\n" +\
"fully compatible with TensorFlow 2. To use NER, you must add the following to the top of your\n" +\
"script or notebook BEFORE you import ktrain (after restarting runtime):\n\n" +\
"import os\n" +\
"os.environ['DISABLE_V2_BEHAVIOR'] = '1'\n"
print(msg)
return
else:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
if predictor_path is None and self.lang == 'zh':
dirpath = os.path.dirname(os.path.abspath(__file__))
fpath = os.path.join(dirpath, 'ner_models/ner_chinese')
elif predictor_path is None and self.lang == 'ru':
dirpath = os.path.dirname(os.path.abspath(__file__))
fpath = os.path.join(dirpath, 'ner_models/ner_russian')
elif predictor_path is None and self.lang=='en':
dirpath = os.path.dirname(os.path.abspath(__file__))
fpath = os.path.join(dirpath, 'ner_models/ner_english')
elif predictor_path is None:
raise ValueError("Unsupported language: if predictor_path is None, then lang must be " +\
"'en' for English, 'zh' for Chinese, or 'ru' for Chinese")
else:
if not os.path.isfile(predictor_path) or not os.path.isfile(predictor_path +'.preproc'):
raise ValueError('could not find a valid predictor model '+\
'%s or valid Preprocessor %s at specified path' % (predictor_path, predictor_path+'.preproc'))
fpath = predictor_path
try:
import io
from contextlib import redirect_stdout
f = io.StringIO()
with redirect_stdout(f):
import ktrain
except:
raise ValueError('ktrain could not be imported. Install with: pip install ktrain')
self.predictor = ktrain.load_predictor(fpath)
def predict(self, texts, merge_tokens=True):
"""
Extract named entities from supplied text
Args:
texts (list of str or str): list of texts to annotate
merge_tokens(bool): If True, tokens will be merged together by the entity
to which they are associated:
('Paul', 'B-PER'), ('Newman', 'I-PER') becomes ('Paul Newman', 'PER')
"""
if isinstance(texts, str): texts = [texts]
results = []
for text in texts:
text = text.strip()
result = self.predictor.predict(text, merge_tokens=merge_tokens)
#if merge_tokens:
#result = self.merge_tokens(result)
results.append(result)
if len(result) == 1: result = result[0]
return result
# 2020-04-30: moved to text.ner.predictor
#def merge_tokens(self, annotated_sentence):
# if self.lang.startswith('zh'):
# sep = ''
# else:
# sep = ' '
# current_token = ""
# current_tag = ""
# entities = []
# for tup in annotated_sentence:
# token = tup[0]
# entity = tup[1]
# tag = entity.split('-')[1] if '-' in entity else None
# prefix = entity.split('-')[0] if '-' in entity else None
# # not within entity
# if tag is None and not current_token:
# continue
# # beginning of entity
# #elif tag and prefix=='B':
# elif tag and (prefix=='B' or prefix=='I' and not current_token):
# if current_token: # consecutive entities
# entities.append((current_token, current_tag))
# current_token = ""
# current_tag = None
# current_token = token
# current_tag = tag
# # end of entity
# elif tag is None and current_token:
# entities.append((current_token, current_tag))
# current_token = ""
# current_tag = None
# continue
# # within entity
# elif tag and current_token: # prefix I
# current_token = current_token + sep + token
# current_tag = tag
# return entities | PypiClean |
/LARRY_dataset-0.0.2rc0-py3-none-any.whl/larry/_analysis/_growth_rate_grouped_violin_plot.py | __module_name__ = "_violin_plot.py"
__author__ = ", ".join(["Michael E. Vinyard"])
__email__ = ", ".join(["[email protected]"])
# import packages: ------------------------------------------------------------
import matplotlib.pyplot as plt
import vinplots
import numpy as np
import anndata
# data prep function: ---------------------------------------------------------
def prepare_data_for_violin_plot(
adata: anndata.AnnData, groupby: str = "d4_d6_major_fate_annot"
) -> dict:
"""
Prep data for the cell fate violin plot.
Parameters:
-----------
adata
type: anndata.AnnData
groupby
type: str
default: "d4_d6_major_fate_annot"
Returns:
--------
ViolinPlotDataDict
type: dict
"""
df = adata.uns["d2_lineage_cells_metadata"]
data_keys = list(adata.uns["GrowthRateDict"].keys())
ViolinPlotDataDict = {}
for key in data_keys:
ViolinPlotDataDict[key] = {}
for group, group_df in df.groupby(groupby):
ViolinPlotDataDict[key][group] = group_df[key].values
return ViolinPlotDataDict
# ViolinPlot class: -----------------------------------------------------------
class ViolinPlot(vinplots.Plot):
def __init__(self, nplots=3, ncols=3, figsize=1.2, kwargs={}):
self._nplots = nplots
self._ncols = ncols
self._figsize = figsize
self._size_major = 6
self._size_minor = 6
self._rm_xticks = True
self._rm_yticks = False
self._mk_plot(**kwargs)
def _format_ticks(
self, ax, size_major=6, size_minor=6, rm_xticks=True, rm_yticks=False
):
ax.tick_params(axis="both", which="major", labelsize=size_major)
ax.tick_params(axis="both", which="minor", labelsize=size_minor)
if rm_xticks:
ax.set_xticks([])
if rm_yticks:
ax.set_yticks([])
def _mk_plot(self, **kwargs):
self.construct(
nplots=self._nplots, ncols=self._ncols, figsize=self._figsize, **kwargs
)
self.modify_spines(ax="all", spines_to_delete=["top", "right", "bottom"])
self.axes = self.linearize()
for ax in self.axes:
self._format_ticks(
ax,
size_major=self._size_major,
size_minor=self._size_minor,
rm_xticks=self._rm_xticks,
rm_yticks=self._rm_yticks,
)
# plot supporting functions class: --------------------------------------------
def _set_violinplot_line_colors(violin_plot, colors=None):
line_keys = ["cmaxes", "cmins", "cmeans", "cbars"]
for key in line_keys:
if key in violin_plot.keys():
c_colors = violin_plot[key].get_color()
if not colors:
colors = np.full(len(c_colors.flatten()), "k")
violin_plot[key].set_color(colors)
def _annotate_celltype(ax, n, height, celltype, celltype_color):
ax.text(
x=(n + 1),
y=height,
s=celltype,
c=celltype_color,
ha="center",
fontsize=8,
fontfamily="arial",
)
def _annotate_cell_count(ax, n, depth, n_pts, celltype_color):
ax.text(
x=(n + 1),
y=depth,
s="n = {}".format(n_pts),
c=celltype_color,
ha="center",
fontsize=6,
fontfamily="arial",
)
# Main violin_plot function: --------------------------------------------------
def growth_rate_grouped_violin_plot(adata, ncols=3):
ViolinPlotDataDict = prepare_data_for_violin_plot(adata)
fig = ViolinPlot(ncols=ncols, kwargs={"wspace": 0.1})
for n, (key, dataset) in enumerate(ViolinPlotDataDict.items()):
title = "{}/{} Growth Rate (Observed) per dominate cell fate".format(
key.split("_")[0], key.split("_")[1]
)
data = list(dataset.values())
groups = list(dataset.keys())
panel = fig.axes[n].violinplot(dataset=data)
colors = [vinplots.colors.LARRY_in_vitro[key] for key in groups]
_set_violinplot_line_colors(panel, colors)
ax = fig.axes[n]
ax.set_title(title, fontsize=10, fontfamily="arial", y=1.05)
ax.set_ylim(-2, 2)
for n, body in enumerate(panel["bodies"]):
celltype = groups[n]
celltype_color = vinplots.colors.LARRY_in_vitro[celltype]
n_pts = len(dataset[celltype])
x_noise = np.random.normal(0, 0.05, n_pts)
ax.scatter(
x_noise + (n + 1),
dataset[celltype],
c=celltype_color,
s=25,
alpha=0.25,
zorder=2,
)
height = max(dataset[celltype]) + 0.15
depth = min(dataset[celltype]) - 0.15
_annotate_celltype(ax, n, height, celltype, celltype_color)
_annotate_cell_count(ax, n, depth, n_pts, celltype_color)
body.set_facecolor(celltype_color)
body.set_edgecolor(celltype_color)
body.set_alpha(0.65) | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/README.md | # MathJax <img alt="GitHub package.json version (branch)" src="https://img.shields.io/github/package-json/v/mathjax/MathJax/legacy-v2.svg">
## Beautiful math in all browsers
MathJax is an open-source JavaScript display engine for LaTeX, MathML, and
AsciiMath notation that works in all modern browsers. It was designed with
the goal of consolidating the recent advances in web technologies into a
single, definitive, math-on-the-web platform supporting the major browsers
and operating systems. It requires no setup on the part of the user (no
plugins to download or software to install), so the page author can write
web documents that include mathematics and be confident that users will be
able to view it naturally and easily. Simply include MathJax and some
mathematics in a web page, and MathJax does the rest.
Some of the main features of MathJax include:
- High-quality display of LaTeX, MathML, and AsciiMath notation in HTML pages
- Supported in most browsers with no plug-ins, extra fonts, or special
setup for the reader
- Easy for authors, flexible for publishers, extensible for developers
- Supports math accessibility, cut-and-paste interoperability, and other
advanced functionality
- Powerful API for integration with other web applications
See <http://www.mathjax.org/> for additional details.
## Installation and Usage
The MathJax installation and usage documentation is available in a
separate GitHub repository at <https://github.com/mathjax/mathjax-docs>.
The HTML versions can now be viewed at <http://docs.mathjax.org/>,
where it is possible for you to submit corrections and modifications
directly to the documentation on line.
## Community
The main MathJax website is <http://www.mathjax.org>, and it includes
announcements and other important information. MathJax is maintained and
distributed on GitHub at <http://github.com/mathjax/MathJax>. A user forum
for asking questions and getting assistance is hosted at Google, and the
bug tracker is hosted at GitHub:
Bug tracker: <https://github.com/mathjax/MathJax/issues>
MathJax-Users Group: <http://groups.google.com/group/mathjax-users>
Before reporting a bug, please check that it has not already been reported.
Also, please use the bug tracker for reporting bugs rather than the help forum.
| PypiClean |
/HydroSensorReader-1.7.6.tar.gz/HydroSensorReader-1.7.6/docs/README.md | # HydroSensorReader
[](https://www.python.org/downloads/release/python-360/)
[](https://opensource.org/licenses/MIT)
[](https://pypi.org/project/HydroSensorReader/)
[](https://ci.appveyor.com/project/jnsebgosselin/hydrosensorreader/branch/master)
[](https://codecov.io/gh/cgq-qgc/HydroSensorReader)
This project aims to provide a simple way to read scientific files produced by
any kind of probe, sensor, or anything used in hydrogeology.
## Installation
You can directly install this package with the command:
`pip install HydroSensorReader`.
After the installation, you can use the package by using
```python
import hydsensread as hsr
# File based reader
file_path = 'my_file_path'
# Files Generating Timeseries results
# =====================================
# read CR1000 files
r = hsr.DATCampbellCRFileReader(file_path)
# read Hanna multiparameter probes
# - (model HI-9828 and HI-9829 tested)
# - Supported extension : '.xls', '.xlsx'
r = hsr.XLSHannaFileReader(file_path)
# Read Solinst Levelogger and Barologger files
# - Supported extension : '.lev', '.xle', '.csv'
r = hsr.SolinstFileReader(file_path)
# Plot the results with
r.plot()
# Files Generating Generic results
# =====================================
# read Maxxam laboratory analysis files.
# - Supported extension : '.xls', '.xlsx'
r = hsr.XSLMaxxamFileReader(file_path)
# Web data scrappers
# These data scrappers use the station name.
station = 'StationName'
r = hsr.GNBWaterQualityStation(station)
```
## Dependencies
- [openpyxl](https://openpyxl.readthedocs.io/en/default/)
- [xlrd](http://www.python-excel.org/)
- [xlwt](http://www.python-excel.org/)
- [beautifulsoup4](https://www.crummy.com/software/BeautifulSoup/)
- [requests](http://docs.python-requests.org/en/master/)
- [pandas](https://pandas.pydata.org/)
- [matplotlib](https://matplotlib.org/)
- [numpy](http://www.numpy.org/)
## Main package definition
### `file_reader`
Implementation of different file readers.
_Each python file contains a main launcher to the class._
* __compagny_file_reader__
_- Reader of generated files comming from different probes or labs._
* __web_page_reader__
_- Web crawlers in charge of extracting data from web sites_
### `file_parser`
This package contains the classes in charge of reading the different files.
More information about these packages is available in the code.
* __abstract_file_parser.py__
* __AbstractFileParser__
_- Abstract class used as an interface to implement the others_
* __concrete_file_parser.py__
* __CSVFileParser__
* __TXTFileParser__
* __EXCELFileParser__
* __WEB_XMLFileParser__
### `site_and_records`
This package contains classes defining the domain elements and the data describing them
* __site.py__
* __Site__
_- A basic site class with the site name a the visited date_
* __SensorPlateform__
_- A Plateform is an object that can take measurements as a standalone object_
* __Sample__
_- Sample as an object manipulated by a laboratory_
* __StationSite__ - Modelisation of a station object
* __StreamFlowStation__ - This specialized class was created to store the information of the [ECCC website](http://climate.weather.gc.ca/historical_data/search_historic_data_e.html)
* __records.py__
* __Parameter__ - Class acting as a structure to store a parameter value (what is observed) and its associated unit
* __Record__
_- A record must have a date, a parameter, a unit, and a value._
* __TimeSeriesRecords__
_- The record_date corresponds to the first date of the list of values. Values are stored as a Dict as follows :_
- { date1: value1, date2: value2,...}
* __ChemistryRecord__
_-A chemistry record has a detection limit, a report date, an analysis type and all the attributes of a __Record___
### `file_example`
This folder contains several examples of files for testing.
## Work To Do
- Add a `.LAS` reader to take care of __borehole geophysics__ files
- Add a `.LAS` reader to take care of __LiDar__ data
- Create a Strategy class so you can input a file and the strategy class select the correct `file_reader` class
- Continue documentation... always...
| PypiClean |
/FFC-2017.1.0.tar.gz/FFC-2017.1.0/ffc/wrappers.py |
# Copyright (C) 2010-2016 Anders Logg
#
# This file is part of FFC.
#
# FFC is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FFC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FFC. If not, see <http://www.gnu.org/licenses/>.
# Python modules
from itertools import chain
# FFC modules
from ffc.log import begin, end, info, error
from ffc.utils import all_equal
from ffc.cpp import make_classname
from ffc.backends.dolfin.wrappers import generate_dolfin_code
from ffc.backends.dolfin.capsules import UFCElementNames, UFCFormNames
__all__ = ["generate_wrapper_code"]
# FIXME: More clean-ups needed here.
def generate_wrapper_code(analysis, prefix, object_names, parameters):
"Generate code for additional wrappers."
# Skip if wrappers not requested
if not parameters["format"] == "dolfin":
return None
# Return dolfin wrapper
return _generate_dolfin_wrapper(analysis, prefix, object_names, parameters)
def _generate_dolfin_wrapper(analysis, prefix, object_names, parameters):
begin("Compiler stage 4.1: Generating additional wrapper code")
# Encapsulate data
(capsules, common_space) = _encapsulate(prefix, object_names, analysis, parameters)
# Generate code
info("Generating wrapper code for DOLFIN")
code = generate_dolfin_code(prefix, "",
capsules, common_space,
error_control=parameters["error_control"])
code += "\n\n"
end()
return code
def _encapsulate(prefix, object_names, analysis, parameters):
# Extract data from analysis
form_datas, elements, element_map, domains = analysis
# FIXME: Encapsulate domains?
num_form_datas = len(form_datas)
common_space = False
# Special case: single element
if num_form_datas == 0:
capsules = _encapsule_element(prefix, elements)
# Special case: with error control
elif parameters["error_control"] and num_form_datas == 11:
capsules = [_encapsule_form(prefix, object_names, form_data, i, element_map)
for (i, form_data) in enumerate(form_datas[:num_form_datas - 1])]
capsules += [_encapsule_form(prefix, object_names, form_datas[-1], num_form_datas - 1,
element_map, "GoalFunctional")]
# Otherwise: generate standard capsules for each form
else:
capsules = [_encapsule_form(prefix, object_names, form_data, i, element_map) for
(i, form_data) in enumerate(form_datas)]
# Check if all argument elements are equal
elements = []
for form_data in form_datas:
elements += form_data.argument_elements
common_space = all_equal(elements)
return (capsules, common_space)
def _encapsule_form(prefix, object_names, form_data, i, element_map, superclassname=None):
element_numbers = [element_map[e] for e in form_data.argument_elements + form_data.coefficient_elements]
if superclassname is None:
superclassname = "Form"
form_names = UFCFormNames(
object_names.get(id(form_data.original_form), "%d" % i),
[object_names.get(id(obj), "w%d" % j) for j, obj in enumerate(form_data.reduced_coefficients)],
make_classname(prefix, "form", i),
[make_classname(prefix, "finite_element", j) for j in element_numbers],
[make_classname(prefix, "dofmap", j) for j in element_numbers],
superclassname)
return form_names
def _encapsule_element(prefix, elements):
element_number = len(elements) - 1 # eh? this doesn't make any sense
args = ("0",
[make_classname(prefix, "finite_element", element_number)],
[make_classname(prefix, "dofmap", element_number)])
return UFCElementNames(*args) | PypiClean |
/Authlib-1.2.1.tar.gz/Authlib-1.2.1/authlib/integrations/base_client/async_app.py | import time
import logging
from authlib.common.urls import urlparse
from .errors import (
MissingRequestTokenError,
MissingTokenError,
)
from .sync_app import OAuth1Base, OAuth2Base
log = logging.getLogger(__name__)
__all__ = ['AsyncOAuth1Mixin', 'AsyncOAuth2Mixin']
class AsyncOAuth1Mixin(OAuth1Base):
async def request(self, method, url, token=None, **kwargs):
async with self._get_oauth_client() as session:
return await _http_request(self, session, method, url, token, kwargs)
async def create_authorization_url(self, redirect_uri=None, **kwargs):
"""Generate the authorization url and state for HTTP redirect.
:param redirect_uri: Callback or redirect URI for authorization.
:param kwargs: Extra parameters to include.
:return: dict
"""
if not self.authorize_url:
raise RuntimeError('Missing "authorize_url" value')
if self.authorize_params:
kwargs.update(self.authorize_params)
async with self._get_oauth_client() as client:
client.redirect_uri = redirect_uri
params = {}
if self.request_token_params:
params.update(self.request_token_params)
request_token = await client.fetch_request_token(self.request_token_url, **params)
log.debug('Fetch request token: {!r}'.format(request_token))
url = client.create_authorization_url(self.authorize_url, **kwargs)
state = request_token['oauth_token']
return {'url': url, 'request_token': request_token, 'state': state}
async def fetch_access_token(self, request_token=None, **kwargs):
"""Fetch access token in one step.
:param request_token: A previous request token for OAuth 1.
:param kwargs: Extra parameters to fetch access token.
:return: A token dict.
"""
async with self._get_oauth_client() as client:
if request_token is None:
raise MissingRequestTokenError()
# merge request token with verifier
token = {}
token.update(request_token)
token.update(kwargs)
client.token = token
params = self.access_token_params or {}
token = await client.fetch_access_token(self.access_token_url, **params)
return token
class AsyncOAuth2Mixin(OAuth2Base):
async def _on_update_token(self, token, refresh_token=None, access_token=None):
if self._update_token:
await self._update_token(
token,
refresh_token=refresh_token,
access_token=access_token,
)
async def load_server_metadata(self):
if self._server_metadata_url and '_loaded_at' not in self.server_metadata:
async with self.client_cls(**self.client_kwargs) as client:
resp = await client.request('GET', self._server_metadata_url, withhold_token=True)
resp.raise_for_status()
metadata = resp.json()
metadata['_loaded_at'] = time.time()
self.server_metadata.update(metadata)
return self.server_metadata
async def request(self, method, url, token=None, **kwargs):
metadata = await self.load_server_metadata()
async with self._get_oauth_client(**metadata) as session:
return await _http_request(self, session, method, url, token, kwargs)
async def create_authorization_url(self, redirect_uri=None, **kwargs):
"""Generate the authorization url and state for HTTP redirect.
:param redirect_uri: Callback or redirect URI for authorization.
:param kwargs: Extra parameters to include.
:return: dict
"""
metadata = await self.load_server_metadata()
authorization_endpoint = self.authorize_url or metadata.get('authorization_endpoint')
if not authorization_endpoint:
raise RuntimeError('Missing "authorize_url" value')
if self.authorize_params:
kwargs.update(self.authorize_params)
async with self._get_oauth_client(**metadata) as client:
client.redirect_uri = redirect_uri
return self._create_oauth2_authorization_url(
client, authorization_endpoint, **kwargs)
async def fetch_access_token(self, redirect_uri=None, **kwargs):
"""Fetch access token in the final step.
:param redirect_uri: Callback or Redirect URI that is used in
previous :meth:`authorize_redirect`.
:param kwargs: Extra parameters to fetch access token.
:return: A token dict.
"""
metadata = await self.load_server_metadata()
token_endpoint = self.access_token_url or metadata.get('token_endpoint')
async with self._get_oauth_client(**metadata) as client:
if redirect_uri is not None:
client.redirect_uri = redirect_uri
params = {}
if self.access_token_params:
params.update(self.access_token_params)
params.update(kwargs)
token = await client.fetch_token(token_endpoint, **params)
return token
async def _http_request(ctx, session, method, url, token, kwargs):
request = kwargs.pop('request', None)
withhold_token = kwargs.get('withhold_token')
if ctx.api_base_url and not url.startswith(('https://', 'http://')):
url = urlparse.urljoin(ctx.api_base_url, url)
if withhold_token:
return await session.request(method, url, **kwargs)
if token is None and ctx._fetch_token and request:
token = await ctx._fetch_token(request)
if token is None:
raise MissingTokenError()
session.token = token
return await session.request(method, url, **kwargs) | PypiClean |
/Cantonese-1.0.7-py3-none-any.whl/src/can_parser.py | from src.keywords import *
import src.can_ast as can_ast
class ParserBase(object):
def __init__(self, token_list : list) -> None:
self.pos = 0
self.tokens = token_list
def look_ahead(self, step = 1) -> list:
return self.tokens[self.pos + step]
def current(self) -> list:
return self.look_ahead(0)
def error(self, f, *args):
err = f
err = '{0}: {1}'.format(self.tokens[self.pos],
err)
raise Exception(err)
def get_next_token_of_kind(self, k, step = 1) -> list:
tk = self.look_ahead(step)
if k != tk[1][0]:
err = 'Line %s: %s附近睇唔明啊大佬!!! Excepted: %s' % (str(tk[0]), str(tk[1][1]), str(k))
self.error(err)
self.pos += 1
return tk
def get_next_token_of(self, expectation : str, step = 1) -> list:
tk = self.look_ahead(step)
if isinstance(expectation, list):
if tk[1][1] not in expectation:
err = 'Line {0}: 睇唔明嘅语法: {1}系唔系"{2}"啊?'.format(tk[0], tk[1][1], expectation)
self.error(err)
self.pos += 1
return tk
else:
if expectation != tk[1][1]:
err = 'Line {0}: 睇唔明嘅语法: {1}系唔系"{2}"啊?'.format(tk[0], tk[1][1], expectation)
self.error(err)
self.pos += 1
return tk
def skip(self, step) -> None:
self.pos += step
def get_line(self) -> int:
return self.tokens[self.pos][0]
def get_type(self, token : list) -> TokenType:
return token[1][0]
def get_token_value(self, token : list) -> str:
return token[1][1]
class ExpParser(ParserBase):
def __init__(self, token_list : list) -> None:
super(ExpParser, self).__init__(token_list)
self.pos = 0
self.tokens = token_list
def parse_exp_list(self):
exps = [self.parse_exp()]
while self.get_type(self.current()) == TokenType.SEP_COMMA:
self.skip(1)
exps.append(self.parse_exp())
return exps
"""
exp ::= null
| false
| true
| Numeral
| LiteralString
| functoindef
| prefixexp
| exp binop exp
| unop exp
| '<*>'
"""
def parse_exp(self):
# parse the expr by such precedence: exp13 > exp12 > exp11 > ... > exp0
# TODO: build a precedence map
return self.parse_exp13()
# exp1 ==> exp2
def parse_exp13(self):
exp = self.parse_exp12()
if self.get_token_value(self.current()) == '==>':
self.skip(1)
exp = can_ast.MappingExp(exp, self.parse_exp12())
return exp
# exp1 or exp2
def parse_exp12(self):
exp = self.parse_exp11()
while self.get_token_value(self.current()) in ['or', '或者']:
line = self.current()[0]
self.skip(1)
exp = can_ast.BinopExp(line, 'or', exp, self.parse_exp11())
return exp
# exp1 and exp2
def parse_exp11(self):
exp = self.parse_exp10()
while self.get_token_value(self.current()) in ['and', '同埋']:
line = self.current()[0]
self.skip(1)
exp = can_ast.BinopExp(line, 'and', exp, self.parse_exp10())
return exp
# Compare
def parse_exp10(self):
exp = self.parse_exp9()
while True:
now = self.current()
if self.get_token_value(now) in ('>', '>=', '<', '<=', '==', '!=', kw_is):
line, op = now[0], self.get_token_value(now)
self.skip(1)
exp = can_ast.BinopExp(line, op if op != kw_is else '==', exp, self.parse_exp9())
elif self.get_token_value(now) in ('in', kw_in, tr_kw_in):
line = now[0]
self.skip(1)
exp = can_ast.BinopExp(line, ' in ', exp, self.parse_exp9())
elif self.get_token_value(now) == '比唔上':
line = now[0]
self.skip(1)
exp = can_ast.BinopExp(line, '<', exp, self.parse_exp9())
else:
break
return exp
# exp1 <|> exp2
def parse_exp9(self):
exp = self.parse_exp8()
while self.get_type(self.current()) == TokenType.OP_BOR or \
self.get_token_value(self.current()) == "或":
line = self.current()[0]
self.skip(1)
exp = can_ast.BinopExp(line, '|', exp, self.parse_exp8())
return exp
# exp1 ^ exp2
def parse_exp8(self):
exp = self.parse_exp7()
while self.get_type(self.current()) == TokenType.OP_WAVE or \
self.get_token_value(self.current()) == "异或":
line = self.current()[0]
self.skip(1)
exp = can_ast.BinopExp(line, '^', exp, self.parse_exp8())
return exp
# exp1 & exp2
def parse_exp7(self):
exp = self.parse_exp6()
while self.get_type(self.current()) == TokenType.OP_BAND or \
self.get_token_value(self.current()) == '与':
line = self.current()[0]
self.skip(1)
exp = can_ast.BinopExp(line, '&', exp, self.parse_exp8())
return exp
# shift
def parse_exp6(self):
exp = self.parse_exp5()
if self.get_type(self.current()) in (TokenType.OP_SHL, TokenType.OP_SHR):
line = self.current()[0]
op = self.get_token_value(self.current())
self.skip(1) # Skip the op
exp = can_ast.BinopExp(line, op, exp, self.parse_exp5())
elif self.get_token_value(self.current()) == '左移':
line, op = self.current()[0], "<<"
self.skip(1) # Skip the op
exp = can_ast.BinopExp(line, op, exp, self.parse_exp5())
elif self.get_token_value(self.current()) == '右移':
line, op = self.current()[0], ">>"
self.skip(1) # Skip the op
exp = can_ast.BinopExp(line, op, exp, self.parse_exp5())
else:
return exp
return exp
# exp1 <-> exp2
def parse_exp5(self):
exp = self.parse_exp4()
if (self.get_type(self.current()) != TokenType.OP_CONCAT):
return exp
line = 0
exps = [exp]
while self.get_type(self.current()) == TokenType.OP_CONCAT:
line = self.current()[0]
self.skip(1)
exps.append(self.parse_exp4())
return can_ast.ConcatExp(line, exps)
# exp1 + / - exp2
def parse_exp4(self):
exp = self.parse_exp3()
while True:
if self.get_type(self.current()) in (TokenType.OP_ADD, TokenType.OP_MINUS):
line, op = self.current()[0], self.get_token_value(self.current())
self.skip(1) # skip the op
exp = can_ast.BinopExp(line, op, exp, self.parse_exp3())
elif self.get_token_value(self.current()) == '加':
line = self.current()[0]
self.skip(1) # skip the op
exp = can_ast.BinopExp(line, '+', exp, self.parse_exp3())
elif self.get_token_value(self.current()) == '减':
line = self.current()[0]
self.skip(1) # skip the op
exp = can_ast.BinopExp(line, '-', exp, self.parse_exp3())
else:
break
return exp
# *, %, /, //
def parse_exp3(self):
exp = self.parse_exp2()
while True:
if self.get_type(self.current()) in (TokenType.OP_MUL, TokenType.OP_MOD,
TokenType.OP_DIV, TokenType.OP_IDIV):
line, op = self.current()[0], self.get_token_value(self.current())
self.skip(1) # Skip the op
exp = can_ast.BinopExp(line, op, exp, self.parse_exp2())
elif self.get_token_value(self.current()) == '乘':
line = self.current()[0]
self.skip(1) # Skip the op
exp = can_ast.BinopExp(line, '*', exp, self.parse_exp2())
elif self.get_token_value(self.current()) == '余':
line = self.current()[0]
self.skip(1) # Skip the op
exp = can_ast.BinopExp(line, '%', exp, self.parse_exp2())
elif self.get_token_value(self.current()) == '整除':
line = self.current()[0]
self.skip(1) # Skip the op
exp = can_ast.BinopExp(line, '//', exp, self.parse_exp2())
elif self.get_token_value(self.current()) == '除':
line = self.current()[0]
self.skip(1) # Skip the op
exp = can_ast.BinopExp(line, '//', exp, self.parse_exp2())
else:
break
return exp
# unop exp
def parse_exp2(self):
if self.get_type(self.current()) == TokenType.OP_NOT or \
self.get_token_value(self.current()) == 'not' or \
self.get_token_value(self.current()) == '-' or \
self.get_token_value(self.current()) == '~':
line, op = self.current()[0], self.get_token_value(self.current())
self.skip(1) # Skip the op
exp = can_ast.UnopExp(line, op, self.parse_exp2())
return exp
elif self.get_type(self.current()) == '取反':
line, op = self.current()[0], '~'
self.skip(1) # Skip the op
exp = can_ast.UnopExp(line, op, self.parse_exp2())
return exp
return self.parse_exp1()
# x ** y
def parse_exp1(self):
exp = self.parse_exp0()
if self.get_type(self.current()) == TokenType.OP_POW:
line, op = self.current()[0], self.get_token_value(self.current())
self.skip(1) # Skip the op
exp = can_ast.BinopExp(line, op, exp, self.parse_exp2())
return exp
def parse_exp0(self):
tk = self.current()
if self.get_token_value(tk) == '<*>':
self.skip(1)
return can_ast.VarArgExp(tk[0])
elif self.get_token_value(tk) in [kw_false, tr_kw_false, "False"]:
self.skip(1)
return can_ast.FalseExp(tk[0])
elif self.get_token_value(tk) in [kw_true, tr_kw_true, "True"]:
self.skip(1)
return can_ast.TrueExp(tk[0])
elif self.get_token_value(tk) in [kw_none, tr_kw_none, "None"]:
self.skip(1)
return can_ast.NullExp(tk[0])
elif self.get_type(tk) == TokenType.NUM:
self.skip(1)
return can_ast.NumeralExp(tk[0], self.get_token_value(tk))
elif self.get_type(tk) == TokenType.STRING:
self.skip(1)
return can_ast.StringExp(tk[0], self.get_token_value(tk))
elif self.get_type(tk) == TokenType.SEP_LBRACK:
return self.parse_listcons()
elif self.get_type(tk) == TokenType.SEP_LCURLY:
return self.parse_mapcons()
# lambda function
elif self.get_token_value(tk) == '$$':
return self.parse_functiondef_expr()
# If-Else expr
elif self.get_token_value(tk) in [kw_expr_if, tr_kw_expr_if]:
return self.parse_if_else_expr()
return self.parse_prefixexp()
"""
prefixexp ::= var
| '(' exp ')'
| '|' exp '|'
| '<|' id '|>'
| functioncall
| id '=' exp
var ::= id
| prefixexp '[' exp ']'
| prefixexp '->' id
"""
def parse_prefixexp(self):
if self.get_type(self.current()) == TokenType.IDENTIFIER:
line, name = self.current()[0], self.get_token_value(self.current())
self.skip(1)
exp = can_ast.IdExp(line, name)
elif self.get_type(self.current()) == TokenType.SEPCIFIC_ID_BEG:
self.skip(1)
name = self.get_token_value(self.get_next_token_of_kind(TokenType.IDENTIFIER, 0))
exp = can_ast.SpecificIdExp(name)
self.get_next_token_of_kind(TokenType.SEPICFIC_ID_END, 0)
# '(' exp ')'
elif self.get_type(self.current()) == TokenType.SEP_LPAREN:
exp = self.parse_parens_exp()
# '|' exp '|'
else:
exp = self.parse_brack_exp()
return self.finish_prefixexp(exp)
def parse_parens_exp(self):
self.get_next_token_of_kind(TokenType.SEP_LPAREN, 0)
exp = self.parse_exp()
self.get_next_token_of_kind(TokenType.SEP_RPAREN, 0)
return exp
def parse_brack_exp(self):
self.get_next_token_of('|', 0)
exp = self.parse_exp()
self.get_next_token_of('|', 0)
return exp
"""
listcons := '[' exp_list ']'
"""
def parse_listcons(self):
self.get_next_token_of_kind(TokenType.SEP_LBRACK, 0)
if self.get_type(self.current()) == TokenType.SEP_RBRACK: # []
self.skip(1)
return can_ast.ListExp("")
else:
exps = self.parse_exp_list()
self.get_next_token_of_kind(TokenType.SEP_RBRACK, 0)
return can_ast.ListExp(exps)
"""
set_or_mapcons := '{' exp_list '}'
"""
def parse_mapcons(self):
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
if self.get_type(self.current()) == TokenType.SEP_RCURLY: # {}
self.skip(1)
return can_ast.MapExp("")
else:
exps = self.parse_exp_list()
self.get_next_token_of_kind(TokenType.SEP_RCURLY, 0)
return can_ast.MapExp(exps)
def finish_prefixexp(self, exp : can_ast.AST):
while True:
kind, value = self.get_type(self.current()), self.get_token_value(self.current())
if kind == TokenType.SEP_LBRACK:
self.skip(1)
key_exp : can_ast.AST = self.parse_exp()
self.get_next_token_of_kind(TokenType.SEP_RBRACK, 0)
exp = can_ast.ListAccessExp(self.get_line(), exp, key_exp)
elif kind == TokenType.SEP_DOT or \
(kind == TokenType.KEYWORD and value == kw_do):
if self.get_type(self.look_ahead(1)) == TokenType.SEP_LCURLY:
# || -> { ... } means we in a method define statement. So break it.
break
# Otherwise it's a ObjectAccessExp
else:
self.skip(1)
tk = self.get_next_token_of_kind(TokenType.IDENTIFIER, 0)
line, name = tk[0], self.get_token_value(tk)
key_exp = can_ast.IdExp(line, name)
exp = can_ast.ObjectAccessExp(line, exp, key_exp)
elif (kind == TokenType.SEP_LPAREN) or \
(kind == TokenType.KEYWORD and value == kw_call_begin):
exp = self.finish_functioncall_exp(exp)
elif value == '嘅长度':
self.skip(1)
key_exp = can_ast.IdExp(self.get_line(), '__len__()')
exp = can_ast.ObjectAccessExp(self.get_line(), exp, key_exp)
elif kind == TokenType.OP_ASSIGN:
self.skip(1)
exp = can_ast.AssignExp(exp, self.parse_exp())
# TODO: Fix bugs here
elif value in [kw_get_value, tr_kw_get_value]:
self.skip(1)
exp = can_ast.AssignExp(self.parse_exp(), exp)
else:
break
return exp
"""
functioncall ::= prefixexp '下' '->' args
| prefixexp args
"""
def finish_functioncall_exp(self, prefix_exp : can_ast.AST):
if (self.get_token_value(self.current()) == kw_call_begin):
self.skip(1)
self.get_next_token_of(kw_do, 0)
line = self.get_line()
args = self.parse_args()
last_line = self.get_line()
return can_ast.FuncCallExp(prefix_exp, args)
else:
line = self.get_line()
args = self.parse_args()
last_line = self.get_line()
return can_ast.FuncCallExp(prefix_exp, args)
def parse_parlist(self):
if self.get_type(self.current()) == TokenType.IDENTIFIER or \
self.get_token_value(self.current()) == '<*>':
par_parser = ParExpParser(self.tokens[self.pos : ])
exps = par_parser.parse_exp_list()
self.skip(par_parser.pos)
del par_parser # free the memory
return exps
elif self.get_token_value(self.current()) == '|':
self.skip(1)
par_parser = ParExpParser(self.tokens[self.pos : ])
exps = par_parser.parse_exp_list()
self.skip(par_parser.pos)
del par_parser # free the memory
self.get_next_token_of('|', 0)
return exps
else:
return []
"""
idlist ::= id [',', id]
| '|' id [',', id] '|'
"""
def parse_idlist(self):
tk = self.current()
if (self.get_type(tk) == TokenType.IDENTIFIER):
ids = [can_ast.IdExp(self.get_line(),
self.get_token_value(self.get_next_token_of_kind(TokenType.IDENTIFIER, 0)))]
while self.get_type(self.current()) == TokenType.SEP_COMMA:
self.skip(1)
if (self.get_type(self.current()) != TokenType.IDENTIFIER):
self.error("Excepted identifier type in idlist!")
ids.append(can_ast.IdExp(self.get_line(),
self.get_token_value(self.current())))
self.skip(1)
return ids
elif (self.get_token_value(tk) == '|'):
self.skip(1)
ids = [can_ast.IdExp(self.get_line(),
self.get_token_value(self.get_next_token_of_kind(TokenType.IDENTIFIER, 0)))]
while self.get_type(self.current()) == TokenType.SEP_COMMA:
self.skip(1)
if (self.get_type(self.current()) != TokenType.IDENTIFIER):
self.error("Excepted identifier type in idlist!")
ids.append(can_ast.IdExp(self.get_line(),
self.get_token_value(self.current())))
self.skip(1)
self.get_next_token_of('|', 0)
return ids
"""
lambda_functoindef ::= '$$' idlist '->' block '搞掂'
"""
def parse_functiondef_expr(self):
self.skip(1)
idlist : list = self.parse_idlist()
blocks : list = []
self.get_next_token_of(kw_do, 0)
blocks.append(self.parse_exp())
return can_ast.LambdaExp(idlist, blocks)
def parse_if_else_expr(self):
self.skip(1)
CondExp : can_ast.AST = self.parse_exp()
self.get_next_token_of([kw_do, tr_kw_do], 0)
IfExp : can_ast.AST = self.parse_exp()
self.get_next_token_of([kw_expr_else, tr_kw_expr_else], 0)
self.get_next_token_of([kw_do, tr_kw_do], 0)
ElseExp : can_ast.AST = self.parse_exp()
return can_ast.IfElseExp(CondExp, IfExp, ElseExp)
"""
args ::= '|' explist '|'
| '(' {explist} ')'
| explist
| LiteralString
| Numeral
| id
"""
def parse_args(self):
args = []
tk = self.current()
if self.get_token_value(tk) == '(':
self.skip(1)
if self.get_token_value(self.current()) != ')':
args = self.parse_exp_list()
self.get_next_token_of_kind(TokenType.SEP_RPAREN, step = 0)
elif self.get_token_value(tk) == '|':
self.skip(1)
if self.get_token_value(self.current()) != '|':
args = self.parse_exp_list()
self.get_next_token_of('|', step = 0)
else:
args = [self.parse_exp()]
return args
"""
parlist ::= id [',', id | '<*>']
| id '=' exp [',' id]
| '<*>'
"""
class ParExpParser(ExpParser):
def __init__(self, token_list: list) -> None:
super().__init__(token_list)
# override
def parse_exp(self):
return self.parse_exp0()
# override
def parse_exp0(self):
tk = self.current()
if self.get_token_value(tk) == '<*>':
self.skip(1)
return can_ast.VarArgExp(tk[0])
return self.parse_prefixexp()
# override
def parse_prefixexp(self):
if self.get_type(self.current()) == TokenType.IDENTIFIER:
line, name = self.current()[0], self.get_token_value(self.current())
self.skip(1)
exp = can_ast.IdExp(line, name)
return self.finish_prefixexp(exp)
else:
raise Exception("Parlist must be a identifier type!")
# override
def finish_prefixexp(self, exp: can_ast.AST):
kind = self.get_type(self.current())
value = self.get_token_value(self.current())
if value == '=' or value == '==>':
self.skip(1)
exp_parser = ExpParser(self.tokens[self.pos : ])
exp2 = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
return can_ast.AssignExp(exp, exp2)
return exp
class StatParser(ParserBase):
def __init__(self, token_list : list, ExpParser = ExpParser) -> None:
super(StatParser, self).__init__(token_list)
self.pos = 0
self.tokens = token_list
# Function address type:
# We can choose the class `ExpParser` Or `ClassBlockExpParser`
self.ExpParser = ExpParser
def parse(self):
tk = self.current()
kind = self.get_type(tk)
tk_value = self.get_token_value(tk)
if kind == TokenType.KEYWORD:
if tk_value in [kw_print, tr_kw_print]:
return self.parse_print_stat()
elif tk_value in [kw_exit, kw_exit_1, kw_exit_2, tr_kw_exit_1, tr_kw_exit_2]:
return self.parse_exit_stat()
elif tk_value in [kw_assign, tr_kw_assign]:
return self.parse_assign_stat()
elif tk_value in [kw_if, tr_kw_if]:
return self.parse_if_stat()
elif tk_value in [kw_import, tr_kw_import]:
return self.parse_import_stat()
elif tk_value == kw_global_set:
return self.parse_global_stat()
elif tk_value in [kw_break, tr_kw_break]:
return self.parse_break_stat()
elif tk_value in [kw_while_do, tr_kw_while_do]:
return self.parse_while_stat()
elif tk_value == '|':
self.skip(1)
if self.get_token_value(self.current()) == '|':
prefix_exps = []
skip_step = 0
else:
exp_parser = self.ExpParser(self.tokens[self.pos : ])
prefix_exps = exp_parser.parse_exp_list()
skip_step = exp_parser.pos = exp_parser.pos # we will skip it in parse_for_stat
del exp_parser
self.get_next_token_of('|', skip_step)
if self.get_token_value(self.look_ahead(skip_step)) in [kw_from, tr_kw_from]:
return self.parse_for_stat(prefix_exps, skip_step)
elif self.get_token_value(self.look_ahead(skip_step)) in [kw_call_begin, tr_kw_call_begin]:
return self.parse_func_call_stat(prefix_exps, skip_step)
elif self.get_token_value(self.look_ahead(skip_step)) in [kw_lst_assign, tr_kw_lst_assign]:
return self.parse_list_assign_stat(prefix_exps, skip_step)
elif self.get_token_value(self.look_ahead(skip_step)) in [kw_set_assign, tr_kw_set_assign]:
return self.parse_set_assign_stat(prefix_exps, skip_step)
elif self.get_token_value(self.look_ahead(skip_step)) in [kw_do, tr_kw_do]:
return self.parse_class_method_call_stat(prefix_exps, skip_step)
elif tk_value == kw_function:
return self.parse_func_def_stat()
elif tk_value == '$$':
return self.parse_lambda_def_stat()
elif tk_value in [kw_pass, tr_kw_pass]:
return self.parse_pass_stat()
elif tk_value in [kw_return, tr_kw_return]:
return self.parse_return_stat()
elif tk_value in [kw_del, tr_kw_del]:
return self.parse_del_stat()
elif tk_value in [kw_type, tr_kw_type]:
return self.parse_type_stat()
elif tk_value in [kw_assert, tr_kw_assert]:
return self.parse_assert_stat()
elif tk_value in [kw_try, tr_kw_try]:
return self.parse_try_stat()
elif tk_value in [kw_raise, tr_kw_raise]:
return self.parse_raise_stat()
elif tk_value in [kw_cmd, tr_kw_cmd]:
return self.parse_cmd_stat()
elif tk_value in [kw_class_def, tr_kw_class_def]:
return self.parse_class_def()
elif tk_value in [kw_call, tr_kw_call]:
return self.parse_call_stat()
elif tk_value in [kw_stackinit, tr_kw_stackinit]:
return self.parse_stack_init_stat()
elif tk_value in [kw_push, tr_kw_push]:
return self.parse_stack_push_stat()
elif tk_value in [kw_pop, tr_kw_pop]:
return self.parse_stack_pop_stat()
elif tk_value in [kw_match, tr_kw_match]:
return self.parse_match_stat()
elif tk_value == '@@@':
return self.parse_extend_stat()
elif tk_value == '&&':
return self.parse_for_each_stat()
elif tk_value in [kw_model, tr_kw_model]:
return self.parse_model_new_stat()
elif tk_value in [kw_turtle_beg, tr_kw_turtle_beg]:
return self.parse_turtle_stat()
elif kind == TokenType.IDENTIFIER:
if self.get_token_value(self.look_ahead(1)) in [kw_from, tr_kw_from]:
return self.parse_for_stat()
elif self.get_token_value(self.look_ahead(1)) in [kw_call_begin, tr_kw_call_begin]:
return self.parse_func_call_stat()
elif self.get_token_value(self.look_ahead(1)) in [kw_do, tr_kw_do]:
return self.parse_class_method_call_stat()
elif kind == TokenType.EOF:
return
def parse_stats(self):
stats = []
while True:
stat = self.parse()
if stat is not None:
stats.append(stat)
else:
break
return stats
def parse_print_stat(self):
self.skip(1) # skip the kw_print
exp_parser = self.ExpParser(self.tokens[self.pos : ])
args = exp_parser.parse_args()
self.skip(exp_parser.pos)
self.get_next_token_of([kw_endprint, tr_kw_endprint], step = 0)
del exp_parser # free the memory
return can_ast.PrintStat(args)
# Parser for muti-assign
def parse_assign_block(self):
# Nothing in assignment block
if self.get_type(self.current()) == TokenType.SEP_RCURLY:
self.skip(1)
return can_ast.AssignStat(0, can_ast.AST, can_ast.AST)
var_list : list = []
exp_list : list= []
while self.get_type(self.current()) != TokenType.SEP_RCURLY:
var_list.append(self.parse_var_list()[0])
self.get_next_token_of([kw_is, kw_is_2, kw_is_3], 0)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
exp_list.append(exp_parser.parse_exp_list()[0])
self.skip(exp_parser.pos)
del exp_parser # free the memory
# Skip the SEP_RCURLY
self.skip(1)
return can_ast.AssignBlockStat(self.get_line(), var_list, exp_list)
def parse_assign_stat(self):
self.skip(1)
if self.get_token_value(self.current()) != kw_do:
var_list = self.parse_var_list()
self.get_next_token_of([kw_is, kw_is_2, kw_is_3], 0)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
exp_list = exp_parser.parse_exp_list()
self.skip(exp_parser.pos)
del exp_parser # free the memory
last_line = self.get_line()
return can_ast.AssignStat(last_line, var_list, exp_list)
else:
# Skip the kw_do
self.skip(1)
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
return self.parse_assign_block()
# The SEP_RCURLY will be checked in self.parse_assign_block()
def parse_var_list(self):
exp_parser = self.ExpParser(self.tokens[self.pos : ])
exp = exp_parser.parse_prefixexp()
self.skip(exp_parser.pos)
del exp_parser
var_list : list = [self.check_var(exp)]
while self.get_type(self.current()) == TokenType.SEP_COMMA:
self.skip(1)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
exp = exp_parser.parse_prefixexp()
self.skip(exp_parser.pos)
del exp_parser
var_list.append(self.check_var(exp))
return var_list
def check_var(self, exp : can_ast.AST):
if isinstance(exp, can_ast.IdExp) or \
isinstance(exp, can_ast.ObjectAccessExp) or \
isinstance(exp, can_ast.ListAccessExp) or \
isinstance(exp, can_ast.ClassSelfExp):
return exp
else:
raise Exception('unreachable!')
def parse_exit_stat(self):
tk = self.look_ahead()
self.skip(1)
return can_ast.ExitStat()
def parse_if_stat(self):
# Skip the keyword if
self.skip(1)
if_blocks : list = []
elif_exps : list = []
elif_blocks : list = []
else_blocks : list = []
exp_parser = self.ExpParser(self.tokens[self.pos : ])
if_exps = exp_parser.parse_exp()
self.skip(exp_parser.pos)
self.get_next_token_of([kw_then, tr_kw_then], 0)
self.get_next_token_of(kw_do, 0)
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
del exp_parser # free the memory
while (self.get_type(self.current()) != TokenType.SEP_RCURLY):
block_parser = StatParser(self.tokens[self.pos : ], self.ExpParser)
if_blocks.append(block_parser.parse())
self.skip(block_parser.pos)
del block_parser # free the memory
self.skip(1) # Skip the SEP_RCURLY '}'
while self.get_token_value(self.current()) in [kw_elif, tr_kw_elif]:
self.skip(1) # skip and try to get the next token
exp_parser = self.ExpParser(self.tokens[self.pos : ])
elif_exps.append(exp_parser.parse_exp())
self.skip(exp_parser.pos)
self.get_next_token_of([kw_then, tr_kw_then], 0)
self.get_next_token_of(kw_do, 0)
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
del exp_parser # free the memory
elif_block : list = []
while (self.get_type(self.current()) != TokenType.SEP_RCURLY):
block_parser = StatParser(self.tokens[self.pos : ], self.ExpParser)
elif_block.append(block_parser.parse())
self.skip(block_parser.pos)
del block_parser # free the memory
elif_blocks.append(elif_block)
self.skip(1) # Skip the SEP_RCURLY '}'
if self.get_token_value(self.current()) == kw_else_or_not:
self.skip(1) # Skip and try yo get the next token
self.get_next_token_of([kw_then, tr_kw_then], 0)
self.get_next_token_of(kw_do, 0)
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
while (self.get_type(self.current()) != TokenType.SEP_RCURLY):
block_parser = StatParser(self.tokens[self.pos : ], self.ExpParser)
else_blocks.append(block_parser.parse())
self.skip(block_parser.pos)
del block_parser # free the memory
self.skip(1) # Skip the SEP_RCURLY '}'
return can_ast.IfStat(if_exps, if_blocks, elif_exps, elif_blocks, else_blocks)
def parse_import_stat(self):
self.skip(1) # Skip the kw_import
exp_parser = self.ExpParser(self.tokens[self.pos : ])
idlist = exp_parser.parse_idlist()
self.skip(exp_parser.pos)
del exp_parser # free thr memory
return can_ast.ImportStat(idlist)
def parse_global_stat(self):
self.skip(1) # Skip the kw_global
exp_parser = self.ExpParser(self.tokens[self.pos : ])
idlist = exp_parser.parse_idlist()
self.skip(exp_parser.pos)
del exp_parser # free the memory
return can_ast.GlobalStat(idlist)
def parse_break_stat(self):
self.skip(1) # Skip the kw_break
return can_ast.BreakStat()
def parse_while_stat(self):
self.skip(1) # Skip the kw_while_do
blocks : list = []
cond_exps : list = []
while (self.get_token_value(self.current()) != kw_while):
block_parser = StatParser(self.tokens[self.pos : ], self.ExpParser)
blocks.append(block_parser.parse())
self.skip(block_parser.pos)
del block_parser # free the memory
self.skip(1) # Skip the kw_while
exp_parser = self.ExpParser(self.tokens[self.pos : ])
cond_exps = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
self.get_next_token_of([kw_whi_end, tr_kw_whi_end], 0)
return can_ast.WhileStat(cond_exps, blocks)
def parse_for_stat(self, prefix_exp : ExpParser = None, skip_prefix_exp : int = 0):
blocks : list = []
if prefix_exp == None:
exp_parser = self.ExpParser(self.tokens[self.pos : ])
id = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
else:
id = prefix_exp[0]
self.skip(skip_prefix_exp)
self.get_next_token_of([kw_from, tr_kw_from], 0)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
from_exp = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
self.get_next_token_of([kw_to, tr_kw_to], 0)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
to_exp = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
while (self.get_token_value(self.current()) not in [kw_endfor, tr_kw_endfor]):
block_parse = StatParser(self.tokens[self.pos : ], self.ExpParser)
blocks.append(block_parse.parse())
self.skip(block_parse.pos)
del block_parse # free the memory
self.skip(1)
return can_ast.ForStat(id, from_exp, to_exp, blocks)
def parse_func_def_stat(self):
self.skip(1) # Skip the kw_function
if self.get_token_value(self.current()) == '即係':
self.skip(1)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
args : list = exp_parser.parse_parlist()
args = [] if args == None else args
self.get_next_token_of([kw_do], 0)
else:
name = self.get_token_value(self.get_next_token_of_kind(TokenType.IDENTIFIER, 0))
exp_parser = self.ExpParser(self.tokens[self.pos : ])
args : list = exp_parser.parse_parlist()
args = [] if args == None else args
self.skip(exp_parser.pos)
del exp_parser # free the memory
self.get_next_token_of([kw_func_begin, tr_kw_func_begin, kw_do], 0)
blocks : list = []
while (self.get_token_value(self.current()) not in [kw_func_end, tr_kw_func_end]):
block_parser = StatParser(self.tokens[self.pos : ], self.ExpParser)
blocks.append(block_parser.parse())
self.skip(block_parser.pos)
del block_parser
self.skip(1)
return can_ast.FunctoinDefStat(can_ast.IdExp(self.get_line(), name), args, blocks)
def parse_func_call_stat(self, prefix_exps : can_ast.AST = None, skip_step : int = 0):
if prefix_exps == None:
func_name = can_ast.IdExp(self.get_line(), self.get_token_value(self.current()))
self.skip(1)
else:
func_name = prefix_exps[0]
self.skip(skip_step)
self.get_next_token_of([kw_call_begin, tr_kw_call_begin], 0)
self.get_next_token_of(kw_do, 0)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
args = exp_parser.parse_args()
self.skip(exp_parser.pos)
del exp_parser
if self.get_token_value(self.current()) == kw_get_value:
self.skip(1)
var_list = self.parse_var_list()
return can_ast.AssignStat(self.get_line(), var_list,
[can_ast.FuncCallExp(func_name, args)])
else:
return can_ast.FuncCallStat(func_name, args)
def parse_class_method_call_stat(self, prefix_exps : can_ast.AST = None, skip_step : int = 0):
if prefix_exps == None:
name_exp = can_ast.IdExp(self.get_line(), self.get_token_value(self.current()))
self.skip(1)
else:
self.skip(skip_step)
name_exp = prefix_exps[0]
self.get_next_token_of(kw_do, 0)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
method : can_ast.AST = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
exp_parser = self.ExpParser(self.tokens[self.pos : ])
args : list = exp_parser.parse_args()
self.skip(exp_parser.pos)
del exp_parser # free thr memory
return can_ast.MethodCallStat(name_exp, method, args)
def parse_list_assign_stat(self, prefix_exp : can_ast.AST, skip_step : int):
self.skip(skip_step)
self.get_next_token_of([kw_lst_assign, tr_kw_lst_assign], 0)
self.get_next_token_of(kw_do, 0)
varlist = self.parse_var_list()
return can_ast.AssignStat(self.get_line(), varlist,
[can_ast.ListExp(prefix_exp)])
def parse_set_assign_stat(self, prefix_exp : can_ast.AST, skip_step : int):
self.skip(skip_step)
self.get_next_token_of([kw_set_assign, tr_kw_set_assign], 0)
self.get_next_token_of(kw_do, 0)
varlist = self.parse_var_list()
return can_ast.AssignStat(self.get_line(), varlist,
[can_ast.MapExp(prefix_exp)])
def parse_pass_stat(self):
self.skip(1)
return can_ast.PassStat()
def parse_assert_stat(self):
self.skip(1)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
exp = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser
return can_ast.AssertStat(exp)
def parse_return_stat(self):
self.skip(1)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
exps = exp_parser.parse_exp_list()
self.skip(exp_parser.pos)
del exp_parser
return can_ast.ReturnStat(exps)
def parse_del_stat(self):
self.skip(1)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
exps = exp_parser.parse_exp_list()
self.skip(exp_parser.pos)
del exp_parser
return can_ast.DelStat(exps)
def parse_try_stat(self):
self.skip(1)
self.get_next_token_of(kw_do, 0)
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
try_blocks : list = []
except_exps : list = []
except_blocks : list = []
finally_blocks : list = []
while self.get_type(self.current()) != TokenType.SEP_RCURLY:
block_parser = StatParser(self.tokens[self.pos : ], self.ExpParser)
try_blocks.append(block_parser.parse())
self.skip(block_parser.pos)
del block_parser
self.skip(1)
self.get_next_token_of([kw_except, tr_kw_except], 0)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
except_exps.append(exp_parser.parse_exp())
self.skip(exp_parser.pos)
del exp_parser
self.get_next_token_of([kw_then, tr_kw_then], 0)
self.get_next_token_of([kw_do], 0)
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
# a temp list to save the block
except_block = []
while self.get_type(self.current()) != TokenType.SEP_RCURLY:
block_parser = StatParser(self.tokens[self.pos : ], self.ExpParser)
except_block.append(block_parser.parse())
self.skip(block_parser.pos)
del block_parser
self.skip(1)
except_blocks.append(except_block)
while self.get_token_value(self.current()) in [kw_except, tr_kw_except]:
self.get_next_token_of([kw_then, tr_kw_then], 0)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
except_exps.append(exp_parser.parse_exp())
self.skip(exp_parser.pos)
del exp_parser
self.get_next_token_of(kw_do, 0)
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
# clear the list
except_block = []
while self.get_type(self.current()) != TokenType.SEP_RCURLY:
block_parser = StatParser(self.tokens[self.pos : ], self.ExpParser)
except_block.append(block_parser.parse())
self.skip(block_parser.pos)
del block_parser
except_blocks.append(except_block)
if self.get_token_value(self.current()) in [kw_finally, tr_kw_finally]:
self.skip(1)
self.get_next_token_of(kw_do, 0)
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
while self.get_type(self.current()) != TokenType.SEP_RCURLY:
block_parser = StatParser(self.tokens[self.pos : ], self.ExpParser)
finally_blocks.append(block_parser.parse())
self.skip(block_parser.pos)
del block_parser
self.skip(1)
return can_ast.TryStat(try_blocks, except_exps, except_blocks, finally_blocks)
def parse_raise_stat(self):
self.skip(1)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
name_exp = exp_parser.parse_exp()
self.skip(exp_parser.pos) # free the memory
del exp_parser
self.get_next_token_of([kw_raise_end, tr_kw_raise_end], 0)
return can_ast.RaiseStat(name_exp)
def parse_type_stat(self):
self.skip(1)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
name_exp = exp_parser.parse_exp()
self.skip(exp_parser.pos) # free the memory
del exp_parser
return can_ast.TypeStat(name_exp)
def parse_cmd_stat(self):
self.skip(1)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
args = exp_parser.parse_args()
self.skip(exp_parser.pos) # free the memory
del exp_parser
return can_ast.CmdStat(args)
def parse_class_def(self):
self.skip(1)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
class_name : can_ast.AST = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
self.get_next_token_of([kw_extend, tr_kw_extend], 0)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
extend_name : list = exp_parser.parse_exp_list()
self.skip(exp_parser.pos)
del exp_parser # free the memory
class_blocks = []
while self.get_token_value(self.current()) not in [kw_endclass, tr_kw_endclass]:
class_block_parser = ClassBlockStatParser(self.tokens[self.pos : ])
class_blocks.append(class_block_parser.parse())
self.skip(class_block_parser.pos)
self.skip(1)
return can_ast.ClassDefStat(class_name, extend_name, class_blocks)
def parse_call_stat(self):
self.skip(1)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
exps = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
return can_ast.CallStat(exps)
def parse_stack_init_stat(self):
self.skip(1)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
exps = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
return can_ast.AssignStat(self.get_line, [exps], [
can_ast.FuncCallExp(can_ast.IdExp(self.get_line(), 'stack'), [])
])
def parse_stack_push_stat(self):
self.skip(1) # skip the kw_push
self.get_next_token_of(kw_do, 0)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
exps = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
exp_parser = self.ExpParser(self.tokens[self.pos : ])
args = exp_parser.parse_args()
self.skip(exp_parser.pos)
del exp_parser # free the memory
return can_ast.MethodCallStat(exps, can_ast.IdExp(self.get_line(), 'push'),
args)
def parse_stack_pop_stat(self):
self.skip(1) # skip the kw_pop
self.get_next_token_of(kw_do, 0)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
exps = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
return can_ast.MethodCallStat(exps, can_ast.IdExp(self.get_line(), 'pop'),
[])
def parse_lambda_def_stat(self):
exp_parse = self.ExpParser(self.tokens[self.pos : ])
lambda_exp = [exp_parse.parse_functiondef_expr()]
self.skip(exp_parse.pos)
del exp_parse # free the memory
self.get_next_token_of(kw_get_value, 0)
exp_parse = self.ExpParser(self.tokens[self.pos : ])
id_exp = exp_parse.parse_idlist()
self.skip(exp_parse.pos)
del exp_parse # free the memory
return can_ast.AssignStat(self.get_line(), id_exp, lambda_exp)
def parse_match_stat(self):
self.skip(1)
match_val : list = []
match_block : list = []
default_match_block : list = []
exp_parser = self.ExpParser(self.tokens[self.pos : ])
match_id = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
self.get_next_token_of(kw_do, 0)
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
while self.get_type(self.current()) != TokenType.SEP_RCURLY:
while self.get_token_value(self.current()) in [kw_case, tr_kw_case]:
self.skip(1)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
match_val.append(exp_parser.parse_exp())
self.skip(exp_parser.pos)
del exp_parser # free the memory
self.get_next_token_of(kw_do, 0)
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
block : list = []
while self.get_type(self.current()) != TokenType.SEP_RCURLY:
stat_parser = StatParser(self.tokens[self.pos : ], self.ExpParser)
block.append(stat_parser.parse())
self.skip(stat_parser.pos)
del stat_parser # free the memory
self.skip(1)
match_block.append(block)
if self.get_token_value(self.current()) == kw_else_or_not:
self.skip(1)
self.get_next_token_of([kw_then, tr_kw_then], 0)
self.get_next_token_of(kw_do, 0)
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
while self.get_type(self.current()) != TokenType.SEP_RCURLY:
stat_parser = StatParser(self.tokens[self.pos : ], self.ExpParser)
default_match_block.append(stat_parser.parse())
self.skip(stat_parser.pos)
del stat_parser # free the memory
self.skip(1)
self.skip(1)
return can_ast.MatchStat(match_id, match_val, match_block, default_match_block)
def parse_for_each_stat(self):
self.skip(1)
id_list : list = []
exp_list : list = []
blocks : list = []
exp_parser = self.ExpParser(self.tokens[self.pos : ])
id_list = exp_parser.parse_idlist()
self.skip(exp_parser.pos)
del exp_parser # free the memory
self.get_next_token_of([kw_in, tr_kw_in], 0)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
exp_list = exp_parser.parse_exp_list()
self.skip(exp_parser.pos)
del exp_parser # free the memory
self.get_next_token_of(kw_do, 0)
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
while (self.get_type(self.current()) != TokenType.SEP_RCURLY):
stat_parser = StatParser(self.tokens[self.pos : ], self.ExpParser)
blocks.append(stat_parser.parse())
self.skip(stat_parser.pos)
del stat_parser # free the memory
self.skip(1)
return can_ast.ForEachStat(id_list, exp_list, blocks)
def parse_extend_stat(self):
self.skip(1)
tk = self.get_next_token_of_kind(TokenType.EXTEND_EXPR, 0)
return can_ast.ExtendStat(self.get_token_value(tk)[1 : -1])
def parse_model_new_stat(self):
self.skip(1)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
model_name = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
self.get_next_token_of([kw_mod_new, tr_kw_mod_new], 0)
self.get_next_token_of(kw_do, 0)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
dataset = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
return can_ast.ModelNewStat(model_name, dataset)
def parse_turtle_stat(self):
self.skip(1)
instruction_ident : list = ["首先", "跟住", "最尾"]
self.get_next_token_of(kw_do, 0)
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
exp_blocks : list = []
while self.get_type(self.current()) != TokenType.SEP_RCURLY:
if self.get_token_value(self.current()) in instruction_ident and \
self.get_type(self.current()) == TokenType.IDENTIFIER:
self.skip(1)
else:
exp_parser = self.ExpParser(self.tokens[self.pos : ])
exp_blocks.append(exp_parser.parse_exp())
self.skip(exp_parser.pos)
del exp_parser # free the memory
self.skip(1)
return can_ast.TurtleStat(exp_blocks)
class LambdaBlockExpParser(ExpParser):
def __init__(self, token_list: list) -> None:
super().__init__(token_list)
def parse_exp0(self):
tk = self.get_token_value(self.current())
class ClassBlockExpParser(ExpParser):
def __init__(self, token_list: list) -> None:
super().__init__(token_list)
def parse_exp0(self):
tk = self.get_token_value(self.current())
if tk in [kw_self, tr_kw_self, '@@']:
self.skip(1)
return can_ast.ClassSelfExp(super().parse_exp0())
else:
return super().parse_exp0()
class ClassBlockStatParser(StatParser):
def __init__(self, token_list: list, ExpParser = ClassBlockExpParser) -> None:
super().__init__(token_list, ExpParser)
def parse_method_block(self):
self.skip(1) # Skip the kw_method
exp_parser = self.ExpParser(self.tokens[self.pos : ])
name_exp = exp_parser.parse_exp()
self.skip(exp_parser.pos)
del exp_parser # free the memory
exp_parser = self.ExpParser(self.tokens[self.pos : ])
args : list = exp_parser.parse_parlist()
args = [] if args == None else args
self.skip(exp_parser.pos)
del exp_parser # free the memory
self.get_next_token_of([kw_func_begin, tr_kw_func_begin, kw_do], 0)
blocks : list = []
# '{' ... '}'
if self.get_type(self.current()) == TokenType.SEP_LCURLY:
self.skip(1)
while self.get_type(self.current()) != TokenType.SEP_RCURLY:
block_parser = ClassBlockStatParser(self.tokens[self.pos : ], self.ExpParser)
blocks.append(block_parser.parse())
self.skip(block_parser.pos)
del block_parser
# '=> ... '%%'
else:
while (self.get_token_value(self.current()) not in [kw_func_end, tr_kw_func_end]):
block_parser = ClassBlockStatParser(self.tokens[self.pos : ], self.ExpParser)
blocks.append(block_parser.parse())
self.skip(block_parser.pos)
del block_parser
self.skip(1)
return can_ast.MethodDefStat(name_exp, args, blocks)
def parse_class_init_stat(self):
self.skip(1)
exp_parser = self.ExpParser(self.tokens[self.pos : ])
args = exp_parser.parse_parlist()
self.skip(exp_parser.pos)
del exp_parser # free the memory
self.get_next_token_of(kw_do, 0)
self.get_next_token_of_kind(TokenType.SEP_LCURLY, 0)
blocks : list = []
while (self.get_type(self.current()) != TokenType.SEP_RCURLY):
block_parser = StatParser(self.tokens[self.pos : ], self.ExpParser)
blocks.append(block_parser.parse())
self.skip(block_parser.pos)
del block_parser # free the memory
self.skip(1)
return can_ast.MethodDefStat(can_ast.IdExp(self.get_line(), '__init__'), args, blocks)
def parse_class_assign_stat(self):
return self.parse_assign_stat()
def parse(self):
tk = self.current()
kind = self.get_type(tk)
tk_value = self.get_token_value(tk)
if tk_value in [kw_method, tr_kw_method]:
return self.parse_method_block()
elif tk_value in [kw_class_assign, tr_kw_class_assign]:
return self.parse_class_assign_stat()
elif tk_value in [kw_class_init, tr_kw_class_init]:
return self.parse_class_init_stat()
else:
return super().parse() | PypiClean |
/Firefly%20III%20API%20Python%20Client-1.5.6.post2.tar.gz/Firefly III API Python Client-1.5.6.post2/firefly_iii_client/model/autocomplete_object_group.py | import re # noqa: F401
import sys # noqa: F401
from firefly_iii_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from firefly_iii_client.exceptions import ApiAttributeError
class AutocompleteObjectGroup(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'id': (str,), # noqa: E501
'name': (str,), # noqa: E501
'title': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'name': 'name', # noqa: E501
'title': 'title', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, id, name, title, *args, **kwargs): # noqa: E501
"""AutocompleteObjectGroup - a model defined in OpenAPI
Args:
id (str):
name (str): Title of the object group found by an auto-complete search.
title (str): Title of the object group found by an auto-complete search.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
self.name = name
self.title = title
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, id, name, title, *args, **kwargs): # noqa: E501
"""AutocompleteObjectGroup - a model defined in OpenAPI
Args:
id (str):
name (str): Title of the object group found by an auto-complete search.
title (str): Title of the object group found by an auto-complete search.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
self.name = name
self.title = title
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | PypiClean |
/Benker-0.5.4.tar.gz/Benker-0.5.4/benker/box.py | u"""
Box
===
A *Box* is a rectangular area defined by two coordinates:
- the top-left corner of the rectangle: the *min* coord,
- the bottom-right corner of the rectangle: the *max* coord.
.. doctest:: box_demo
:hide:
>>> from benker.box import Box
>>> from benker.coord import Coord
To instantiate a :class:`~benker.box.Box`, you can do:
.. doctest:: box_demo
>>> b1 = Box(Coord(5, 6), Coord(7, 8))
>>> b2 = Box(Coord(5, 6))
>>> b3 = Box(1, 2, 2, 3)
>>> b4 = Box(1, 2)
>>> b5 = Box(b1)
*Box* objects have a string representation à la Excel:
.. doctest:: box_demo
>>> for box in b1, b2, b3, b4, b5:
... print(box)
E6:G8
E6
A2:B3
A2
E6:G8
You can calculate the *width* and *height* of boxes:
.. doctest:: box_demo
>>> b1 = Box(Coord(5, 6), Coord(6, 8))
>>> b1.width, b1.height
(2, 3)
>>> b2 = Box(Coord(5, 6))
>>> b2.width, b2.height
(1, 1)
You can determine if a *Coord* is included in a *Box*:
.. doctest:: box_demo
>>> top_left = Coord(5, 6)
>>> top_right = Coord(6, 6)
>>> bottom_left = Coord(5, 8)
>>> bottom_right = Coord(6, 8)
>>> b1 = Box(top_left, bottom_right)
>>> top_left in b1
True
>>> top_right in b1
True
>>> bottom_left in b1
True
>>> bottom_right in b1
True
>>> Coord(7, 6) in b1
False
>>> (5, 7) in b1
True
You can determine if two boxes intersect each other, or are disjoints:
.. doctest:: box_demo
>>> b1 = Box(Coord(5, 6), Coord(6, 8))
>>> b2 = Box(Coord(6, 6), Coord(6, 7))
>>> b3 = Box(Coord(7, 6), Coord(7, 8))
>>> b2.intersect(b3)
False
>>> b1.isdisjoint(b2)
False
>>> b2.isdisjoint(b1)
False
>>> b1.isdisjoint(b3)
True
>>> b3.isdisjoint(b1)
True
"""
import collections
import functools
from benker.coord import Coord
from benker.size import Size
BoxTuple = collections.namedtuple('BoxTuple', ['min', 'max'])
@functools.total_ordering
class Box(BoxTuple):
"""
A *Box* is a rectangular area defined by two coordinates:
- the top-left corner of the rectangle: the *min* coord,
- the bottom-right corner of the rectangle: the *max* coord.
Usage:
.. doctest:: box_demo
>>> from benker.box import Box
>>> box = Box(1, 1, 5, 3)
>>> box
Box(min=Coord(x=1, y=1), max=Coord(x=5, y=3))
"""
__slots__ = ()
def __new__(cls, *args):
"""
Construct a new *Box*.
:param args:
Arguments could be:
- top-left and bottom-right coordinates of the box: Coord(*min_coord*, *max_coord*);
- top-left coordinates and box size: Coord(*min_coord*, *max_coord*), Size(*width*, *height*);
- top-left coordinates of the box: Coord(*min_coord*, *max_coord*),
assuming box size is (1, 1),
- coordinates of the box: *min_x*, *min_y*, *max_x*, *max_y*;
- coordinates of the top-left coord: *min_x*, *min_y*,
assuming box size is (1, 1);
- another box.
:return: The new *Box*.
:raises TypeError:
if the arguments are of incompatible types.
:raises ValueError:
"""
types = tuple(map(type, args))
if types == (Coord, Coord):
min_x, min_y = args[0]
max_x, max_y = args[1]
elif types == (Coord, Size):
min_x, min_y = args[0]
max_x, max_y = args[0] + args[1] - 1
elif types == (Coord,):
min_x, min_y = args[0]
max_x, max_y = min_x, min_y
elif types == (int, int, int, int):
min_x, min_y, max_x, max_y = args
elif types == (int, int):
min_x, min_y = args
max_x, max_y = min_x, min_y
elif types == (cls,):
# no duplicate
return args[0]
else:
raise TypeError(repr(types))
if 0 < min_x <= max_x and 0 < min_y <= max_y:
min_coord = Coord(min_x, min_y)
max_coord = Coord(max_x, max_y)
# noinspection PyArgumentList
return super(Box, cls).__new__(cls, min_coord, max_coord)
raise ValueError(*args)
def __str__(self):
if (self.width, self.height) == (1, 1):
return str(self.min)
return str(self.min) + ':' + str(self.max)
def __repr__(self):
return super(Box, self).__repr__().replace('BoxTuple', 'Box')
@property
def width(self):
# type: () -> int
return self.max.x - self.min.x + 1
@property
def height(self):
# type: () -> int
return self.max.y - self.min.y + 1
@property
def size(self):
return Size(self.width, self.height)
def transform(self, coord=None, size=None):
min_coord = self.min if coord is None else Coord.from_value(coord)
size = self.size if size is None else Size.from_value(size)
max_coord = min_coord + size - 1
return Box(min_coord, max_coord)
def move_to(self, coord):
return self.transform(coord=coord)
def resize(self, size):
return self.transform(size=size)
def __contains__(self, coord):
coord_type = type(coord)
if coord_type is Coord:
return self.min.x <= coord.x <= self.max.x and self.min.y <= coord.y <= self.max.y
elif coord_type is tuple and tuple(map(type, coord)) == (int, int):
return self.min.x <= coord[0] <= self.max.x and self.min.y <= coord[1] <= self.max.y
elif coord_type is Box:
return coord.min in self and coord.max in self
raise TypeError(repr(coord_type))
def intersect(self, that):
# type: (Box) -> bool
return ((self.min in that or self.max in that) or
(that.min in self or that.max in self))
def isdisjoint(self, that):
# type: (Box) -> bool
return not self.intersect(that)
def union(self, *others):
"""
Return the union of *self* and all the *boxes*.
Usage:
.. doctest:: box_demo
>>> from benker.box import Box
>>> from benker.coord import Coord
>>> b1 = Box(Coord(3, 2), Coord(6, 4))
>>> b2 = Box(Coord(4, 3), Coord(5, 7))
>>> b1.union(b2)
Box(min=Coord(x=3, y=2), max=Coord(x=6, y=7))
>>> b1 | b2
Box(min=Coord(x=3, y=2), max=Coord(x=6, y=7))
:param others: collections of boxes
:return: The bounding box of all the boxes.
"""
boxes = (self,) + others
min_list = tuple(box.min for box in boxes)
min_coord = Coord(min(coord.x for coord in min_list), min(coord.y for coord in min_list))
max_list = tuple(box.max for box in boxes)
max_coord = Coord(max(coord.x for coord in max_list), max(coord.y for coord in max_list))
bounding_box = Box(min_coord, max_coord)
assert all(box in bounding_box for box in boxes)
return bounding_box
__or__ = union
def intersection(self, *others):
"""
Return the intersection of *self* and all the *boxes*.
Usage:
.. doctest:: box_demo
>>> from benker.box import Box
>>> from benker.coord import Coord
>>> b1 = Box(Coord(3, 2), Coord(6, 4))
>>> b2 = Box(Coord(4, 3), Coord(5, 7))
>>> b1.intersection(b2)
Box(min=Coord(x=4, y=3), max=Coord(x=5, y=4))
>>> b1 & b2
Box(min=Coord(x=4, y=3), max=Coord(x=5, y=4))
:param others: collections of boxes
:return: The inner box of all the boxes.
:raises ValueError: if the two boxes are disjoint.
"""
boxes = (self,) + others
min_list = tuple(box.min for box in boxes)
min_coord = Coord(max(coord.x for coord in min_list), max(coord.y for coord in min_list))
max_list = tuple(box.max for box in boxes)
max_coord = Coord(min(coord.x for coord in max_list), min(coord.y for coord in max_list))
try:
return Box(min_coord, max_coord)
except ValueError:
# the two boxes are disjoint
raise ValueError(boxes)
__and__ = intersection
# total ordering based on coordinates (*y* first, then *x*).
# This or ordering can be used to sort boxes by rows and columns
def __lt__(self, other):
"""
Compare two boxes.
Usage::
.. doctest:: box_demo
>>> from benker.box import Box
>>> b1 = Box(Coord(3, 2), Coord(6, 4))
>>> b1 < b1
False
>>> b1 < Box(Coord(3, 2), Coord(6, 5))
True
>>> b1 < Box(Coord(3, 2), Coord(7, 4))
True
>>> b1 < Box(Coord(4, 2), Coord(6, 4))
True
>>> b1 < Box(Coord(3, 3), Coord(6, 4))
True
:param other: other box
:return: ``True`` if *self* < *other*
"""
if isinstance(other, Box):
if self.min.y == other.min.y:
if self.min.x == other.min.x:
if self.max.y == other.max.y:
return self.max.x < other.max.x
else:
return self.max.y < other.max.y
else:
return self.min.x < other.min.x
else:
return self.min.y < other.min.y
return NotImplemented | PypiClean |
/Netfoll_TL-2.0.1-py3-none-any.whl/netfoll_tl/tl/custom/adminlogevent.py | from ...tl import types
from ...utils import get_input_peer
class AdminLogEvent:
"""
Represents a more friendly interface for admin log events.
Members:
original (:tl:`ChannelAdminLogEvent`):
The original :tl:`ChannelAdminLogEvent`.
entities (`dict`):
A dictionary mapping user IDs to :tl:`User`.
When `old` and `new` are :tl:`ChannelParticipant`, you can
use this dictionary to map the ``user_id``, ``kicked_by``,
``inviter_id`` and ``promoted_by`` IDs to their :tl:`User`.
user (:tl:`User`):
The user that caused this action (``entities[original.user_id]``).
input_user (:tl:`InputPeerUser`):
Input variant of `user`.
"""
def __init__(self, original, entities):
self.original = original
self.entities = entities
self.user = entities[original.user_id]
self.input_user = get_input_peer(self.user)
@property
def id(self):
"""
The ID of this event.
"""
return self.original.id
@property
def date(self):
"""
The date when this event occurred.
"""
return self.original.date
@property
def user_id(self):
"""
The ID of the user that triggered this event.
"""
return self.original.user_id
@property
def action(self):
"""
The original :tl:`ChannelAdminLogEventAction`.
"""
return self.original.action
@property
def old(self):
"""
The old value from the event.
"""
ori = self.original.action
if isinstance(ori, (
types.ChannelAdminLogEventActionChangeAbout,
types.ChannelAdminLogEventActionChangeTitle,
types.ChannelAdminLogEventActionChangeUsername,
types.ChannelAdminLogEventActionChangeLocation,
types.ChannelAdminLogEventActionChangeHistoryTTL,
)):
return ori.prev_value
elif isinstance(ori, types.ChannelAdminLogEventActionChangePhoto):
return ori.prev_photo
elif isinstance(ori, types.ChannelAdminLogEventActionChangeStickerSet):
return ori.prev_stickerset
elif isinstance(ori, types.ChannelAdminLogEventActionEditMessage):
return ori.prev_message
elif isinstance(ori, (
types.ChannelAdminLogEventActionParticipantToggleAdmin,
types.ChannelAdminLogEventActionParticipantToggleBan
)):
return ori.prev_participant
elif isinstance(ori, (
types.ChannelAdminLogEventActionToggleInvites,
types.ChannelAdminLogEventActionTogglePreHistoryHidden,
types.ChannelAdminLogEventActionToggleSignatures
)):
return not ori.new_value
elif isinstance(ori, types.ChannelAdminLogEventActionDeleteMessage):
return ori.message
elif isinstance(ori, types.ChannelAdminLogEventActionDefaultBannedRights):
return ori.prev_banned_rights
elif isinstance(ori, types.ChannelAdminLogEventActionDiscardGroupCall):
return ori.call
elif isinstance(ori, (
types.ChannelAdminLogEventActionExportedInviteDelete,
types.ChannelAdminLogEventActionExportedInviteRevoke,
types.ChannelAdminLogEventActionParticipantJoinByInvite,
)):
return ori.invite
elif isinstance(ori, types.ChannelAdminLogEventActionExportedInviteEdit):
return ori.prev_invite
@property
def new(self):
"""
The new value present in the event.
"""
ori = self.original.action
if isinstance(ori, (
types.ChannelAdminLogEventActionChangeAbout,
types.ChannelAdminLogEventActionChangeTitle,
types.ChannelAdminLogEventActionChangeUsername,
types.ChannelAdminLogEventActionToggleInvites,
types.ChannelAdminLogEventActionTogglePreHistoryHidden,
types.ChannelAdminLogEventActionToggleSignatures,
types.ChannelAdminLogEventActionChangeLocation,
types.ChannelAdminLogEventActionChangeHistoryTTL,
)):
return ori.new_value
elif isinstance(ori, types.ChannelAdminLogEventActionChangePhoto):
return ori.new_photo
elif isinstance(ori, types.ChannelAdminLogEventActionChangeStickerSet):
return ori.new_stickerset
elif isinstance(ori, types.ChannelAdminLogEventActionEditMessage):
return ori.new_message
elif isinstance(ori, (
types.ChannelAdminLogEventActionParticipantToggleAdmin,
types.ChannelAdminLogEventActionParticipantToggleBan
)):
return ori.new_participant
elif isinstance(ori, (
types.ChannelAdminLogEventActionParticipantInvite,
types.ChannelAdminLogEventActionParticipantVolume,
)):
return ori.participant
elif isinstance(ori, types.ChannelAdminLogEventActionDefaultBannedRights):
return ori.new_banned_rights
elif isinstance(ori, types.ChannelAdminLogEventActionStopPoll):
return ori.message
elif isinstance(ori, types.ChannelAdminLogEventActionStartGroupCall):
return ori.call
elif isinstance(ori, (
types.ChannelAdminLogEventActionParticipantMute,
types.ChannelAdminLogEventActionParticipantUnmute,
)):
return ori.participant
elif isinstance(ori, types.ChannelAdminLogEventActionToggleGroupCallSetting):
return ori.join_muted
elif isinstance(ori, types.ChannelAdminLogEventActionExportedInviteEdit):
return ori.new_invite
@property
def changed_about(self):
"""
Whether the channel's about was changed or not.
If `True`, `old` and `new` will be present as `str`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionChangeAbout)
@property
def changed_title(self):
"""
Whether the channel's title was changed or not.
If `True`, `old` and `new` will be present as `str`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionChangeTitle)
@property
def changed_username(self):
"""
Whether the channel's username was changed or not.
If `True`, `old` and `new` will be present as `str`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionChangeUsername)
@property
def changed_photo(self):
"""
Whether the channel's photo was changed or not.
If `True`, `old` and `new` will be present as :tl:`Photo`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionChangePhoto)
@property
def changed_sticker_set(self):
"""
Whether the channel's sticker set was changed or not.
If `True`, `old` and `new` will be present as :tl:`InputStickerSet`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionChangeStickerSet)
@property
def changed_message(self):
"""
Whether a message in this channel was edited or not.
If `True`, `old` and `new` will be present as
`Message <telethon.tl.custom.message.Message>`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionEditMessage)
@property
def deleted_message(self):
"""
Whether a message in this channel was deleted or not.
If `True`, `old` will be present as
`Message <telethon.tl.custom.message.Message>`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionDeleteMessage)
@property
def changed_admin(self):
"""
Whether the permissions for an admin in this channel
changed or not.
If `True`, `old` and `new` will be present as
:tl:`ChannelParticipant`.
"""
return isinstance(
self.original.action,
types.ChannelAdminLogEventActionParticipantToggleAdmin)
@property
def changed_restrictions(self):
"""
Whether a message in this channel was edited or not.
If `True`, `old` and `new` will be present as
:tl:`ChannelParticipant`.
"""
return isinstance(
self.original.action,
types.ChannelAdminLogEventActionParticipantToggleBan)
@property
def changed_invites(self):
"""
Whether the invites in the channel were toggled or not.
If `True`, `old` and `new` will be present as `bool`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionToggleInvites)
@property
def changed_location(self):
"""
Whether the location setting of the channel has changed or not.
If `True`, `old` and `new` will be present as :tl:`ChannelLocation`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionChangeLocation)
@property
def joined(self):
"""
Whether `user` joined through the channel's
public username or not.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionParticipantJoin)
@property
def joined_invite(self):
"""
Whether a new user joined through an invite
link to the channel or not.
If `True`, `new` will be present as
:tl:`ChannelParticipant`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionParticipantInvite)
@property
def left(self):
"""
Whether `user` left the channel or not.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionParticipantLeave)
@property
def changed_hide_history(self):
"""
Whether hiding the previous message history for new members
in the channel was toggled or not.
If `True`, `old` and `new` will be present as `bool`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionTogglePreHistoryHidden)
@property
def changed_signatures(self):
"""
Whether the message signatures in the channel were toggled
or not.
If `True`, `old` and `new` will be present as `bool`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionToggleSignatures)
@property
def changed_pin(self):
"""
Whether a new message in this channel was pinned or not.
If `True`, `new` will be present as
`Message <telethon.tl.custom.message.Message>`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionUpdatePinned)
@property
def changed_default_banned_rights(self):
"""
Whether the default banned rights were changed or not.
If `True`, `old` and `new` will
be present as :tl:`ChatBannedRights`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionDefaultBannedRights)
@property
def stopped_poll(self):
"""
Whether a poll was stopped or not.
If `True`, `new` will be present as
`Message <telethon.tl.custom.message.Message>`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionStopPoll)
@property
def started_group_call(self):
"""
Whether a group call was started or not.
If `True`, `new` will be present as :tl:`InputGroupCall`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionStartGroupCall)
@property
def discarded_group_call(self):
"""
Whether a group call was started or not.
If `True`, `old` will be present as :tl:`InputGroupCall`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionDiscardGroupCall)
@property
def user_muted(self):
"""
Whether a participant was muted in the ongoing group call or not.
If `True`, `new` will be present as :tl:`GroupCallParticipant`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionParticipantMute)
@property
def user_unmutted(self):
"""
Whether a participant was unmuted from the ongoing group call or not.
If `True`, `new` will be present as :tl:`GroupCallParticipant`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionParticipantUnmute)
@property
def changed_call_settings(self):
"""
Whether the group call settings were changed or not.
If `True`, `new` will be `True` if new users are muted on join.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionToggleGroupCallSetting)
@property
def changed_history_ttl(self):
"""
Whether the Time To Live of the message history has changed.
Messages sent after this change will have a ``ttl_period`` in seconds
indicating how long they should live for before being auto-deleted.
If `True`, `old` will be the old TTL, and `new` the new TTL, in seconds.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionChangeHistoryTTL)
@property
def deleted_exported_invite(self):
"""
Whether the exported chat invite has been deleted.
If `True`, `old` will be the deleted :tl:`ExportedChatInvite`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionExportedInviteDelete)
@property
def edited_exported_invite(self):
"""
Whether the exported chat invite has been edited.
If `True`, `old` and `new` will be the old and new
:tl:`ExportedChatInvite`, respectively.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionExportedInviteEdit)
@property
def revoked_exported_invite(self):
"""
Whether the exported chat invite has been revoked.
If `True`, `old` will be the revoked :tl:`ExportedChatInvite`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionExportedInviteRevoke)
@property
def joined_by_invite(self):
"""
Whether a new participant has joined with the use of an invite link.
If `True`, `old` will be pre-existing (old) :tl:`ExportedChatInvite`
used to join.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionParticipantJoinByInvite)
@property
def changed_user_volume(self):
"""
Whether a participant's volume in a call has been changed.
If `True`, `new` will be the updated :tl:`GroupCallParticipant`.
"""
return isinstance(self.original.action,
types.ChannelAdminLogEventActionParticipantVolume)
def __str__(self):
return str(self.original)
def stringify(self):
return self.original.stringify() | PypiClean |
/3ETool-0.8.3.tar.gz/3ETool-0.8.3/README.md | # 3ETool
__3ETool__ contains some useful tools developed by the [SERG research group](https://www.dief.unifi.it/vp-177-serg-group-english-version.html)
of the [University of Florence](https://www.unifi.it/changelang-eng.html) for performing exergo-economic and exergo environmental analysis. The __user manual__ can be downloaded [here](https://firebasestorage.googleapis.com/v0/b/etapp-serggroup.appspot.com/o/3ETool_res%2FOther%2FUser%20Guide-eng.pdf?alt=media&token=db51ff1e-4c63-48b9-8b42-322a2eee44da). Moreover, some [youtube tutorials](https://www.youtube.com/playlist?list=PLj6A7PjCJLfa9xNOFwRc3D_XroWhKlptj) have been uploaded in order to help the user in compiling the excel file.
### 1 - Calculation process 🤔⚙
The beta version can be downloaded using __PIP__:
```
pip install 3ETool
```
Once the installation has been completed the user can import the tool, and paste to a desired location the __user manual__, the __components documentation__ or the __default excel file__, as in the _matlab version_ of the app.
```python
import EEETools
EEETools.paste_user_manual()
EEETools.paste_components_documentation()
EEETools.paste_default_excel_file()
```
Finally, once the Excel file has been compiled, the calculation can be initialized trough this command:
```python
import EEETools
EEETools.calculate()
```
calculation options and user defined excel path can be passed to the function as well (default values are _true_); in case user does not pass the path, the app will automatically open a filedialog window so that it can be selected manually
```python
import EEETools
EEETools.calculate(
excel_path="your_excel_file.xlsx",
calculate_on_pf_diagram = True,
loss_cost_is_zero = True,
valve_is_dissipative = True,
condenser_is_dissipative = True
)
```
### 2 - Debugging tools 👨💻🔍
Excel file can be debugged using some specific tools that can be launched using the following command (please select the
Excel file that you want to debug on program request):
```python
import EEETools
EEETools.launch_connection_debug()
```
Another possible way of debugging the code is to ask the program to export the debug information on the Excel file:
```python
import EEETools
EEETools.export_debug_information()
```
Finally, topology can be displayed using:
```python
import EEETools
EEETools.launch_network_display()
```
### 3 - Sankey Diagrams 📈📊
Sankey diagram can be plotted using the following command:
```python
import EEETools
EEETools.plot_sankey(
generate_on_pf_diagram=True,
display_costs=True,
)
```
* ___<span style="color:#3399ff">generate_on_pf_diagram</span>___ can be omitted and
__is <span style="color:#3399ff">True</span> by default__:
- if __False__ the connections are defined according to the __physical topology__ of the plant
- if __True__ the connections are based on the __product-fuel__ definition
* ___<span style="color:#3399ff">display_costs</span>___ can be omitted and
__is <span style="color:#3399ff">False</span> by default__:
- if __False__ the thickness of the connection in the sankey diagram is proportional to the __exergy flux__ between
the components (in _kW_)
- if __True__ the thickness of the connection in the sankey diagram is proportional to the
__economic (or environmental) flux__ between the components (in _€/s_ or in _Pts/s_). In addition, for each
connection, the __color intensity__ is proportional to the __relative cost of the stream__ (in _€/kJ_ or in _Pts/kJ_)
### 4 - Code Structure 📁
__The application code is divided into 3 main folders:__<br/><br/>
__MainModules__ directory contains Base modules such as _Block, Connection, ArrayHandler and Drawer Classes._<br/>
__Block Sublcasses__ contains a Block subclass for each component type (e.g. expander, compressor etc.)<br/>
__Tools__ contains different APIs needed for the program to run (e.g. the cost correlation handler,
the EES code generator, and the importer and exporter for both Excel and xml files)
### 5 - Important Information ⚠
__-------------------------- !!! THIS IS A BETA VERSION !!! --------------------------__
please report any bug or problems in the installation to [email protected]_<br/>
for further information visit: https://tinyurl.com/SERG-3ETool
__-------------------------------- !!! HOW TO CITE !!! --------------------------------__
The following reference can be used to cite the tool in publications:
Fiaschi, D., Manfrida, G., Ungar, P., Talluri, L.
Development of an exergo-economic and exergo-environmental tool for power plant assessment:
evaluation of a geothermal case study.
https://doi.org/10.52202/062738-0003
| PypiClean |
/Flask-PagedList-0.2.1.zip/Flask-PagedList-0.2.1/README.rst | Flask-PagedList
===============
Flask-PagedList bundles features from pypagedlist into a blueprint named 'PagedList'.
Installation
------------
Flask-PagedList can be installed using ``pip`` from `PyPI`_. `virtualenv`_ is highly
recommended:
.. code-block:: bash
pip install -U flask-pagedlist
.. _PyPI: https://pypi.python.org/pypi/Flask-PagedList
.. _virtualenv: https://virtualenv.pypa.io/en/latest/
For development, instead, clone the `github repository <https://github.com/timonwong/flask-pagedlist>`_, and use:
.. code-block:: bash
python setup.py develop # Or, pip install -e .
Example Project
----------------
Screenshots
~~~~~~~~~~~
Traditional
+++++++++++
.. image:: https://raw.github.com/timonwong/flask-pagedlist/gh-pages/screenshots/demo1.png
AJAX
++++
.. image:: https://raw.github.com/timonwong/flask-pagedlist/gh-pages/screenshots/demo2.png
Run
~~~
Here is a simple description about how to run the demo project:
.. code-block:: bash
# 1. Clone this git repo in order to get the example
git clone https://github.com/timonwong/flask-pagedlist.git
cd flask-pagedlist
# 2. Install flask-pagedlist
pip install -U flask-pagedlist
# 3. Install dependencies for the example
pip install -U -r example-requirements.txt
# 4. Start the example project
python run_example.py
Usage
-----
Basic usage
~~~~~~~~~~~
Here is an example:
.. code-block:: python
from flask_pagedlist import PagedList
PagedList(app)
Static resources
~~~~~~~~~~~~~~~~
``pagedlist_static_for`` is recommended for requiring static resources for Flask-PagedList in templates:
.. code-block:: python
def pagedlist_static_for(filename, use_minified=None):
"""Resource finding function, also available in templates.
:param filename: File to find a URL for.
:param use_minified': If set to ``True``/``False``, use/don't use
minified. If ``None``, use the default setting
from ``PAGEDLIST_USE_MINIFIED``.
:return: A URL.
"""
Configuration
~~~~~~~~~~~~~
``PAGEDLIST_USE_MINIFIED``
++++++++++++++++++++++++++
``PAGEDLIST_PREFIX``
++++++++++++++++++++
| PypiClean |
/5gasp_cli-0.4.0.tar.gz/5gasp_cli-0.4.0/src/DescriptorParser/parser.py |
import yaml
from typing import List
class ConnectionPointsParser:
"""
Injected Tags Parser Class
"""
validated_connection_points = None
_interfaces = None
def __init__(self, nsd_filepaths: List[str]):
"""
Constructor
"""
self.base_nsd_filepaths = set(nsd_filepaths)
self.validated_connection_points = {}
self._interfaces = []
self.infer_connection_points()
def infer_connection_points(self):
for filepath in self.base_nsd_filepaths:
self.parse_descriptor(filepath)
def parse_descriptor(self, nsd_filepath):
'''
Retrieves all the tags from the given descriptor
'''
try:
connection_points = []
with open(nsd_filepath, "r") as file:
descriptor = yaml.safe_load(file)
for network_service in descriptor['nsd']['nsd']:
ns_id = network_service['id']
for df in network_service['df']:
connection_points += self.infer_connection_points_from_df(
ns_id=ns_id,
df=df,
)
# save connection points
self.validated_connection_points[nsd_filepath] = {
"ns_id": ns_id,
"connection_points": connection_points
}
except Exception as e:
print("\nThe following exception occurred when trying to infer " +
f"connection points for the NSD '{nsd_filepath}': {e}.")
def infer_connection_points_from_df(self, ns_id, df):
connection_points = []
for vnf in df['vnf-profile']:
vnf_id = vnf['id']
for constituent in vnf['virtual-link-connectivity']:
for constituent_cpd in constituent["constituent-cpd-id"]:
interface_id = constituent_cpd['constituent-cpd-id']
connection_points.append(
"{{deployment_info|" + f"{ns_id}|{vnf_id}|" +
f"{interface_id}" + "}}"
)
return connection_points
@property
def connection_points(self):
'''
Get interfaces
'''
return self.validated_connection_points | PypiClean |
/MDP-3.6.tar.gz/MDP-3.6/mdp/graph/graph.py | from builtins import map
from builtins import object
# inspired by some code by Nathan Denny (1999)
# see http://www.ece.arizona.edu/~denny/python_nest/graph_lib_1.0.1.html
try:
# use reduce against BDFL's will even on python > 2.6
from functools import reduce
except ImportError:
pass
class GraphException(Exception):
"""Base class for exception in the graph package."""
pass
class GraphTopologicalException(GraphException):
"""Exception thrown during a topological sort if the graph is cyclical."""
pass
def is_sequence(x):
return isinstance(x, (list, tuple))
def recursive_map(func, seq):
"""Apply a function recursively on a sequence and all subsequences."""
def _func(x):
if is_sequence(x):
return recursive_map(func, x)
else:
return func(x)
return list(map(_func, seq))
def recursive_reduce(func, seq, *argv):
"""Apply reduce(func, seq) recursively to a sequence and all its
subsequences."""
def _func(x, y):
if is_sequence(y):
return func(x, recursive_reduce(func, y))
else:
return func(x, y)
return reduce(_func, seq, *argv)
class GraphNode(object):
"""Represent a graph node and all information attached to it."""
def __init__(self, data=None):
self.data = data
# edges in
self.ein = []
# edges out
self.eout = []
def add_edge_in(self, edge):
self.ein.append(edge)
def add_edge_out(self, edge):
self.eout.append(edge)
def remove_edge_in(self, edge):
self.ein.remove(edge)
def remove_edge_out(self, edge):
self.eout.remove(edge)
def get_edges_in(self, from_ = None):
"""Return a copy of the list of the entering edges. If from_
is specified, return only the nodes coming from that node."""
inedges = self.ein[:]
if from_:
inedges = [edge for edge in inedges if edge.head == from_]
return inedges
def get_edges_out(self, to_ = None):
"""Return a copy of the list of the outgoing edges. If to_
is specified, return only the nodes going to that node."""
outedges = self.eout[:]
if to_:
outedges = [edge for edge in outedges if edge.tail == to_]
return outedges
def get_edges(self, neighbor = None):
"""Return a copy of all edges. If neighbor is specified, return
only the edges connected to that node."""
return ( self.get_edges_in(from_=neighbor) +
self.get_edges_out(to_=neighbor) )
def in_degree(self):
"""Return the number of entering edges."""
return len(self.ein)
def out_degree(self):
"""Return the number of outgoing edges."""
return len(self.eout)
def degree(self):
"""Return the number of edges."""
return self.in_degree()+self.out_degree()
def in_neighbors(self):
"""Return the neighbors down in-edges (i.e. the parents nodes)."""
return [x.get_head() for x in self.ein]
def out_neighbors(self):
"""Return the neighbors down in-edges (i.e. the parents nodes)."""
return [x.get_tail() for x in self.eout]
def neighbors(self):
return self.in_neighbors() + self.out_neighbors()
class GraphEdge(object):
"""Represent a graph edge and all information attached to it."""
def __init__(self, head, tail, data=None):
# head node
self.head = head
# neighbors out
self.tail = tail
# arbitrary data slot
self.data = data
def get_ends(self):
"""Return the tuple (head_id, tail_id)."""
return (self.head, self.tail)
def get_tail(self):
return self.tail
def get_head(self):
return self.head
class Graph(object):
"""Represent a directed graph."""
def __init__(self):
# list of nodes
self.nodes = []
# list of edges
self.edges = []
# node functions
def add_node(self, data=None):
node = GraphNode(data=data)
self.nodes.append(node)
return node
def remove_node(self, node):
# the node is not in this graph
if node not in self.nodes:
errstr = 'This node is not part of the graph (%s)' % node
raise GraphException(errstr)
# remove all edges containing this node
for edge in node.get_edges():
self.remove_edge(edge)
# remove the node
self.nodes.remove(node)
# edge functions
def add_edge(self, head, tail, data=None):
"""Add an edge going from head to tail.
head : head node
tail : tail node
"""
# create edge
edge = GraphEdge(head, tail, data=data)
# add edge to head and tail node
head.add_edge_out(edge)
tail.add_edge_in(edge)
# add to the edges dictionary
self.edges.append(edge)
return edge
def remove_edge(self, edge):
head, tail = edge.get_ends()
# remove from head
head.remove_edge_out(edge)
# remove from tail
tail.remove_edge_in(edge)
# remove the edge
self.edges.remove(edge)
### populate functions
def add_nodes(self, data):
"""Add many nodes at once.
data -- number of nodes to add or sequence of data values, one for
each new node"""
if not is_sequence(data):
data = [None]*data
return list(map(self.add_node, data))
def add_tree(self, tree):
"""Add a tree to the graph.
The tree is specified with a nested list of tuple, in a LISP-like
notation. The values specified in the list become the values of
the single nodes.
Return an equivalent nested list with the nodes instead of the values.
Example:
>>> a=b=c=d=e=None
>>> g.add_tree( (a, b, (c, d ,e)) )
corresponds to this tree structure, with all node values set to None:
a
/ \
b c
/ \
d e
"""
def _add_edge(root, son):
self.add_edge(root, son)
return root
nodes = recursive_map(self.add_node, tree)
recursive_reduce(_add_edge, nodes)
return nodes
def add_full_connectivity(self, from_nodes, to_nodes):
"""Add full connectivity from a group of nodes to another one.
Return a list of lists of edges, one for each node in 'from_nodes'.
Example: create a two-layer graph with full connectivity.
>>> g = Graph()
>>> layer1 = g.add_nodes(10)
>>> layer2 = g.add_nodes(5)
>>> g.add_full_connectivity(layer1, layer2)
"""
edges = []
for from_ in from_nodes:
edges.append([self.add_edge(from_, x) for x in to_nodes])
return edges
###### graph algorithms
def topological_sort(self):
"""Perform a topological sort of the nodes. If the graph has a cycle,
throw a GraphTopologicalException with the list of successfully
ordered nodes."""
# topologically sorted list of the nodes (result)
topological_list = []
# queue (fifo list) of the nodes with in_degree 0
topological_queue = []
# {node: in_degree} for the remaining nodes (those with in_degree>0)
remaining_indegree = {}
# init queues and lists
for node in self.nodes:
indegree = node.in_degree()
if indegree == 0:
topological_queue.append(node)
else:
remaining_indegree[node] = indegree
# remove nodes with in_degree 0 and decrease the in_degree of their sons
while len(topological_queue):
# remove the first node with degree 0
node = topological_queue.pop(0)
topological_list.append(node)
# decrease the in_degree of the sons
for son in node.out_neighbors():
remaining_indegree[son] -= 1
if remaining_indegree[son] == 0:
topological_queue.append(son)
# if not all nodes were covered, the graph must have a cycle
# raise a GraphTopographicalException
if len(topological_list)!=len(self.nodes):
raise GraphTopologicalException(topological_list)
return topological_list
### Depth-First sort
def _dfs(self, neighbors_fct, root, visit_fct=None):
# core depth-first sort function
# changing the neighbors function to return the sons of a node,
# its parents, or both one gets normal dfs, reverse dfs, or
# dfs on the equivalent undirected graph, respectively
# result list containing the nodes in Depth-First order
dfs_list = []
# keep track of all already visited nodes
visited_nodes = { root: None }
# stack (lifo) list
dfs_stack = []
dfs_stack.append(root)
while len(dfs_stack):
# consider the next node on the stack
node = dfs_stack.pop()
dfs_list.append(node)
# visit the node
if visit_fct is not None:
visit_fct(node)
# add all sons to the stack (if not already visited)
for son in neighbors_fct(node):
if son not in visited_nodes:
visited_nodes[son] = None
dfs_stack.append(son)
return dfs_list
def dfs(self, root, visit_fct=None):
"""Return a list of nodes in some Depth First order starting from
a root node. If defined, visit_fct is applied on each visited node.
The returned list does not have to contain all nodes in the
graph, but only the ones reachable from the root.
"""
neighbors_fct = lambda node: node.out_neighbors()
return self._dfs(neighbors_fct, root, visit_fct=visit_fct)
def undirected_dfs(self, root, visit_fct=None):
"""Perform Depth First sort.
This function is identical to dfs, but the sort is performed on
the equivalent undirected version of the graph."""
neighbors_fct = lambda node: node.neighbors()
return self._dfs(neighbors_fct, root, visit_fct=visit_fct)
### Connected components
def connected_components(self):
"""Return a list of lists containing the nodes of all connected
components of the graph."""
visited = {}
def visit_fct(node, visited=visited):
visited[node] = None
components = []
nodes = self.nodes
for node in nodes:
if node in visited:
continue
components.append(self.undirected_dfs(node, visit_fct))
return components
def is_weakly_connected(self):
"""Return True if the graph is weakly connected."""
return len(self.undirected_dfs(self.nodes[0]))==len(self.nodes)
### Breadth-First Sort
# BFS and DFS could be generalized to one function. I leave them
# distinct for clarity.
def _bfs(self, neighbors_fct, root, visit_fct=None):
# core breadth-first sort function
# changing the neighbors function to return the sons of a node,
# its parents, or both one gets normal bfs, reverse bfs, or
# bfs on the equivalent undirected graph, respectively
# result list containing the nodes in Breadth-First order
bfs_list = []
# keep track of all already visited nodes
visited_nodes = { root: None }
# queue (fifo) list
bfs_queue = []
bfs_queue.append(root)
while len(bfs_queue):
# consider the next node in the queue
node = bfs_queue.pop(0)
bfs_list.append(node)
# visit the node
if visit_fct is not None:
visit_fct(node)
# add all sons to the queue (if not already visited)
for son in neighbors_fct(node):
if son not in visited_nodes:
visited_nodes[son] = None
bfs_queue.append(son)
return bfs_list
def bfs(self, root, visit_fct=None):
"""Return a list of nodes in some Breadth First order starting from
a root node. If defined, visit_fct is applied on each visited node.
Note the returned list does not have to contain all nodes in the
graph, but only the ones reachable from the root."""
neighbors_fct = lambda node: node.out_neighbors()
return self._bfs(neighbors_fct, root, visit_fct=visit_fct)
def undirected_bfs(self, root, visit_fct=None):
"""Perform Breadth First sort.
This function is identical to bfs, but the sort is performed on
the equivalent undirected version of the graph."""
neighbors_fct = lambda node: node.neighbors()
return self._bfs(neighbors_fct, root, visit_fct=visit_fct) | PypiClean |
/Muntjac-1.1.2.tar.gz/Muntjac-1.1.2/muntjac/addon/colorpicker/color_picker_popup.py |
import sys
import traceback
from colorsys import hsv_to_rgb
from muntjac.ui.window import Window
from muntjac.ui.button import IClickListener, Button
from muntjac.ui.tab_sheet import TabSheet
from muntjac.ui.vertical_layout import VerticalLayout
from muntjac.addon.colorpicker.color import Color
from muntjac.addon.colorpicker.color_picker \
import ICoordinates2Color, IColorChangeListener
from muntjac.addon.colorpicker.color_picker_history import ColorPickerHistory
from muntjac.addon.colorpicker.color_change_event import ColorChangeEvent
from muntjac.addon.colorpicker.color_picker_preview import ColorPickerPreview
from muntjac.addon.colorpicker.color_picker_select import ColorPickerSelect
from muntjac.addon.colorpicker.color_selector import IColorSelector
from muntjac.addon.colorpicker.color_picker_gradient import ColorPickerGradient
from muntjac.ui.horizontal_layout import HorizontalLayout
from muntjac.ui.alignment import Alignment
from muntjac.ui.slider import Slider, ValueOutOfBoundsException
from muntjac.data.property import IValueChangeListener
_COLOR_CHANGE_METHOD = getattr(IColorChangeListener, 'colorChanged')
class ColorPickerPopup(Window, IClickListener, IColorChangeListener,
IColorSelector):
"""The Class ColorPickerPopup.
@author: John Ahlroos / ITMill Oy
@author: Richard Lincoln
"""
_STYLENAME = 'v-colorpicker-popup'
def __init__(self, initialColor):
"""Instantiates a new color picker popup."""
#: The tabs.
self._tabs = TabSheet()
#: The layout.
self._layout = VerticalLayout()
#: The ok button.
self._ok = Button('OK')
#: The cancel button.
self._cancel = Button('Cancel')
#: The resize button.
self._resize = Button('...')
#: The selected color.
self._selectedColor = Color.WHITE
#: The history.
self._history = None
#: The history container.
self._historyContainer = None
#: The rgb gradient.
self._rgbGradient = None
#: The hsv gradient.
self._hsvGradient = None
#: The red slider.
self._redSlider = None
#: The green slider.
self._greenSlider = None
#: The blue slider.
self._blueSlider = None
#: The hue slider.
self._hueSlider = None
#: The saturation slider.
self._saturationSlider = None
#: The value slider.
self._valueSlider = None
#: The preview on the rgb tab.
self._rgbPreview = None
#: The preview on the hsv tab.
self._hsvPreview = None
#: The preview on the swatches tab.
self._selPreview = None
#: The color select.
self._colorSelect = None
#: The selectors.
self._selectors = set()
super(ColorPickerPopup, self).__init__()
self._selectedColor = initialColor
self.setWidth('250px')
self.setScrollable(False)
self.setStyleName(self._STYLENAME)
self.setResizable(False)
self.setImmediate(True)
# Create the history
self._history = ColorPickerHistory()
self._history.addListener(self, IColorChangeListener)
# Create the preview on the rgb tab
self._rgbPreview = ColorPickerPreview(self._selectedColor)
self._rgbPreview.setWidth('220px')
self._rgbPreview.setHeight('20px')
self._rgbPreview.addListener(self, IColorChangeListener)
self._selectors.add(self._rgbPreview)
# Create the preview on the hsv tab
self._hsvPreview = ColorPickerPreview(self._selectedColor)
self._hsvPreview.setWidth('220px')
self._hsvPreview.setHeight('20px')
self._hsvPreview.addListener(self, IColorChangeListener)
self._selectors.add(self._hsvPreview)
# Create the preview on the swatches tab
self._selPreview = ColorPickerPreview(self._selectedColor)
self._selPreview.setWidth('220px')
self._selPreview.setHeight('20px')
self._selPreview.addListener(self, IColorChangeListener)
self._selectors.add(self._selPreview)
# Set the layout
self._layout.setSpacing(False)
self._layout.setSizeFull()
self.setContent(self._layout)
# Create the tabs
self._rgbTab = self.createRGBTab(self._selectedColor)
self._tabs.addTab(self._rgbTab, 'RGB', None)
self._hsvTab = self.createHSVTab(self._selectedColor)
self._tabs.addTab(self._hsvTab, 'HSV', None)
self._swatchesTab = self.createSelectTab()
self._tabs.addTab(self._swatchesTab, 'Swatches', None)
# Add the tabs
self._tabs.setWidth('100%')
self._layout.addComponent(self._tabs)
# Add the history
self._history.setWidth('97%')
self._history.setHeight('27px')
# Create the default colors
defaultColors = list()
defaultColors.append(Color.BLACK)
defaultColors.append(Color.WHITE)
# Create the history
innerContainer = VerticalLayout()
innerContainer.setSizeFull()
innerContainer.addComponent(self._history)
innerContainer.setExpandRatio(self._history, 1)
outerContainer = VerticalLayout()
outerContainer.setWidth('99%')
outerContainer.setHeight('27px')
outerContainer.addComponent(innerContainer)
self._historyContainer = outerContainer
self._layout.addComponent(self._historyContainer)
# Add the resize button for the history
self._resize.addListener(self, IClickListener)
self._resize.setData(False)
self._resize.setWidth('100%')
self._resize.setHeight('10px')
self._resize.setStyleName('resize-button')
self._layout.addComponent(self._resize)
# Add the buttons
self._ok.setWidth('70px')
self._ok.addListener(self, IClickListener)
self._cancel.setWidth('70px')
self._cancel.addListener(self, IClickListener)
buttons = HorizontalLayout()
buttons.addComponent(self._ok)
buttons.addComponent(self._cancel)
buttons.setWidth('100%')
buttons.setHeight('30px')
buttons.setComponentAlignment(self._ok, Alignment.MIDDLE_CENTER)
buttons.setComponentAlignment(self._cancel, Alignment.MIDDLE_CENTER)
self._layout.addComponent(buttons)
self.setHeight(self.calculateHeight())
def calculateHeight(self):
"""Calculates the height of the popup menu
@return: Returns the height in CSS string representation
"""
if self._historyContainer.isVisible():
historyHeight = self._historyContainer.getHeight()
else:
historyHeight = 0
tabsHeight = 0 if self._tabs.areTabsHidden() else 32
contentHeight = 370
buttonsHeight = 30
previewHeight = 20 if self._rgbPreview.isVisible() else 0
return (str(historyHeight + tabsHeight + contentHeight + buttonsHeight
+ previewHeight + 10) + 'px')
def createRGBTab(self, color):
"""Creates the rgb tab.
@return: the component
"""
rgbLayout = VerticalLayout()
rgbLayout.setMargin(False, False, True, False)
rgbLayout.addComponent(self._rgbPreview)
# Add the RGB color gradient
self._rgbGradient = ColorPickerGradient('rgb-gradient', RGBConverter())
self._rgbGradient.setColor(color)
self._rgbGradient.addListener(self, IColorChangeListener)
rgbLayout.addComponent(self._rgbGradient)
self._selectors.add(self._rgbGradient)
# Add the RGB sliders
sliders = VerticalLayout()
sliders.setStyleName('rgb-sliders')
self._redSlider = Slider('Red', 0, 255)
try:
self._redSlider.setValue(color.getRed())
except ValueOutOfBoundsException:
pass
self._redSlider.setImmediate(True)
self._redSlider.setWidth('220px')
self._redSlider.setStyleName('rgb-slider')
self._redSlider.addStyleName('red')
self._redSlider.addListener(RedValueChangeListener(self),
IValueChangeListener)
sliders.addComponent(self._redSlider)
self._greenSlider = Slider('Green', 0, 255)
try:
self._greenSlider.setValue(color.getGreen())
except ValueOutOfBoundsException:
pass
self._greenSlider.setStyleName('rgb-slider')
self._greenSlider.addStyleName('green')
self._greenSlider.setWidth('220px')
self._greenSlider.setImmediate(True)
self._greenSlider.addListener(GreenValueChangeListener(self),
IValueChangeListener)
sliders.addComponent(self._greenSlider)
self._blueSlider = Slider('Blue', 0, 255)
try:
self._blueSlider.setValue(color.getBlue())
except ValueOutOfBoundsException:
pass
self._blueSlider.setStyleName('rgb-slider')
self._blueSlider.setStyleName('blue')
self._blueSlider.setImmediate(True)
self._blueSlider.setWidth('220px')
self._blueSlider.addListener(BlueValueChangeListener(self),
IValueChangeListener)
sliders.addComponent(self._blueSlider)
rgbLayout.addComponent(sliders)
return rgbLayout
def createHSVTab(self, color):
"""Creates the hsv tab.
@return: the component
"""
hsvLayout = VerticalLayout()
hsvLayout.setMargin(False, False, True, False)
hsvLayout.addComponent(self._hsvPreview)
# Add the hsv gradient
self._hsvGradient = ColorPickerGradient('hsv-gradient',
HSVConverter(self))
self._hsvGradient.setColor(color)
self._hsvGradient.addListener(self, IColorChangeListener)
hsvLayout.addComponent(self._hsvGradient)
self._selectors.add(self._hsvGradient)
# Add the hsv sliders
hsv = color.getHSV()
sliders = VerticalLayout()
sliders.setStyleName('hsv-sliders')
self._hueSlider = Slider('Hue', 0, 360)
try:
self._hueSlider.setValue(hsv[0])
except ValueOutOfBoundsException:
pass
self._hueSlider.setStyleName('hsv-slider')
self._hueSlider.addStyleName('hue-slider')
self._hueSlider.setWidth('220px')
self._hueSlider.setImmediate(True)
self._hueSlider.addListener(HueValueChangeListener(self),
IColorChangeListener)
sliders.addComponent(self._hueSlider)
self._saturationSlider = Slider('Saturation', 0, 100)
try:
self._saturationSlider.setValue(hsv[1])
except ValueOutOfBoundsException:
pass
self._saturationSlider.setStyleName('hsv-slider')
self._saturationSlider.setWidth('220px')
self._saturationSlider.setImmediate(True)
self._saturationSlider.addListener(SaturationValueChangeListener(self),
IColorChangeListener)
sliders.addComponent(self._saturationSlider)
self._valueSlider = Slider('Value', 0, 100)
try:
self._valueSlider.setValue(hsv[2])
except ValueOutOfBoundsException:
pass
self._valueSlider.setStyleName('hsv-slider')
self._valueSlider.setWidth('220px')
self._valueSlider.setImmediate(True)
self._valueSlider.addListener(BrightnessValueChangeListener(self),
IColorChangeListener)
sliders.addComponent(self._valueSlider)
hsvLayout.addComponent(sliders)
return hsvLayout
def createSelectTab(self):
"""Creates the select tab.
@return: the component
"""
selLayout = VerticalLayout()
selLayout.setMargin(False, False, True, False)
selLayout.addComponent(self._selPreview)
self._colorSelect = ColorPickerSelect()
self._colorSelect.addListener(self, IColorChangeListener)
selLayout.addComponent(self._colorSelect)
return selLayout
def buttonClick(self, event):
# History resize was clicked
if event.getButton() == self._resize:
state = self._resize.getData()
# minimize
if state:
self._historyContainer.setHeight('27px')
self._history.setHeight('27px')
# maximize
else:
self._historyContainer.setHeight('90px')
self._history.setHeight('80px')
self.setHeight(self.calculateHeight())
self._resize.setData(bool(not state))
# Ok button was clicked
elif event.getButton() == self._ok:
self._history.setColor(self.getColor())
self.fireColorChanged()
self.close()
# Cancel button was clicked
elif event.getButton() == self._cancel:
self.close()
def fireColorChanged(self):
"""Notifies the listeners that the color changed"""
self.fireEvent(ColorChangeEvent(self, self.getColor()))
def getHistory(self):
"""Gets the history.
@return: the history
"""
return self._history
def setColor(self, color):
if color is None:
return
self._selectedColor = color
self._hsvGradient.setColor(self._selectedColor)
self._hsvPreview.setColor(self._selectedColor)
self._rgbGradient.setColor(self._selectedColor)
self._rgbPreview.setColor(self._selectedColor)
self._selPreview.setColor(self._selectedColor)
def getColor(self):
return self._selectedColor
def getColorHistory(self):
"""Gets the color history.
@return: the color history
"""
return list(self._history.getHistory())
def colorChanged(self, event):
self._selectedColor = event.getColor()
try:
self._redSlider.setValue(self._selectedColor.getRed())
self._blueSlider.setValue(self._selectedColor.getBlue())
self._greenSlider.setValue(self._selectedColor.getGreen())
hsv = self._selectedColor.getHSV()
self._hueSlider.setValue(hsv[0] * 360.0)
self._saturationSlider.setValue(hsv[1] * 100.0)
self._valueSlider.setValue(hsv[2] * 100.0)
except ValueOutOfBoundsException:
traceback.print_exc(file=sys.stdout)
for s in self._selectors:
if (event.getSource() != s and s is not self
and s.getColor() != self._selectedColor):
s.setColor(self._selectedColor)
def addListener(self, listener, iface=None):
"""Adds a color change listener
@param listener:
The color change listener
"""
if (isinstance(listener, IColorChangeListener) and
(iface is None or issubclass(iface, IColorChangeListener))):
self.registerListener(ColorChangeEvent, listener,
_COLOR_CHANGE_METHOD)
super(ColorPickerPopup, self).addListener(listener, iface)
def addCallback(self, callback, eventType=None, *args):
if eventType is None:
eventType = callback._eventType # set by decorator
if issubclass(eventType, ColorChangeEvent):
self.registerCallback(ColorChangeEvent, callback, None, *args)
else:
super(ColorPickerPopup, self).addCallback(callback, eventType,
*args)
def removeListener(self, listener, iface=None):
"""Removes a color change listener
@param listener:
The listener
"""
if (isinstance(listener, IColorChangeListener) and
(iface is None or issubclass(iface, IColorChangeListener))):
self.withdrawListener(ColorChangeEvent, listener)
super(ColorPickerPopup, self).removeListener(listener, iface)
def removeCallback(self, callback, eventType=None):
if eventType is None:
eventType = callback._eventType
if issubclass(eventType, ColorChangeEvent):
self.withdrawCallback(ColorChangeEvent, callback)
else:
super(ColorPickerPopup, self).removeCallback(callback, eventType)
def tabIsVisible(self, tab):
"""Is the tab visible
@param tab:
The tab to check
"""
tabIterator = self._tabs.getComponentIterator()
for t in tabIterator:
if t == tab:
return True
return False
def tabsNumVisible(self):
"""How many tabs are visible
@return: The number of tabs visible
"""
tabIterator = self._tabs.getComponentIterator()
tabCounter = 0
for _ in tabIterator:
tabCounter += 1
return tabCounter
def checkIfTabsNeeded(self):
"""Checks if tabs are needed and hides them if not"""
if self.tabsNumVisible() == 1:
self._tabs.hideTabs(True)
self.setHeight(self.calculateHeight())
else:
self._tabs.hideTabs(False)
self.setHeight(self.calculateHeight())
def setRGBTabVisible(self, visible):
"""Set RGB tab visibility
@param visible:
The visibility of the RGB tab
"""
if visible and not self.tabIsVisible(self._rgbTab):
self._tabs.addTab(self._rgbTab, 'RGB', None)
self.checkIfTabsNeeded()
elif not visible and self.tabIsVisible(self._rgbTab):
self._tabs.removeComponent(self._rgbTab)
self.checkIfTabsNeeded()
def setHSVTabVisible(self, visible):
"""Set HSV tab visibility
@param visible:
The visibility of the HSV tab
"""
if visible and not self.tabIsVisible(self._hsvTab):
self._tabs.addTab(self._hsvTab, 'HSV', None)
self.checkIfTabsNeeded()
elif not visible and self.tabIsVisible(self._hsvTab):
self._tabs.removeComponent(self._hsvTab)
self.checkIfTabsNeeded()
def setSwatchesTabVisible(self, visible):
"""Set Swatches tab visibility
@param visible:
The visibility of the Swatches tab
"""
if visible and not self.tabIsVisible(self._swatchesTab):
self._tabs.addTab(self._swatchesTab, 'Swatches', None)
self.checkIfTabsNeeded()
elif not visible and self.tabIsVisible(self._swatchesTab):
self._tabs.removeComponent(self._swatchesTab)
self.checkIfTabsNeeded()
def setHistoryVisible(self, visible):
"""Set the History visibility
"""
self._historyContainer.setVisible(visible)
self._resize.setVisible(visible)
self.setHeight(self.calculateHeight())
def setPreviewVisible(self, visible):
"""Set the preview visibility
"""
self._hsvPreview.setVisible(visible)
self._rgbPreview.setVisible(visible)
self._selPreview.setVisible(visible)
self.setHeight(self.calculateHeight())
def attach(self):
self.setHeight(self.calculateHeight())
# Implement the RGB color converter
class RGBConverter(ICoordinates2Color):
def calculate(self, c_or_x, y=None):
if y is None:
c = c_or_x
hsv = c.getHSV()
x = round(hsv[0] * 220.0)
y = 0
# lower half
if hsv[1] == 1.0:
y = round(110.0 - ((hsv[1] + hsv[2]) * 110.0))
else:
y = round(hsv[1] * 110.0)
return [x, y]
else:
x = c_or_x
h = x / 220.0
s = 1.0
v = 1.0
if y < 110:
s = y / 110.0
elif y > 110:
v = 1.0 - ((y - 110.0) / 110.0)
return Color(*hsv_to_rgb(h, s, v))
# Implement the HSV color converter
class HSVConverter(ICoordinates2Color):
def __init__(self, cpp):
self._cpp = cpp
def calculate(self, c_or_x, y=None):
if y is None:
c = c_or_x
hsv = c.getHSV()
# Calculate coordinates
x = round(hsv[2] * 220.0)
y = round(220 - (hsv[1] * 220.0))
# Create background color of clean color
bgColor = Color(*hsv_to_rgb(hsv[0], 1.0, 1.0))
self._cpp._hsvGradient.setBackgroundColor(bgColor)
return [x, y]
else:
x = c_or_x
saturation = 1.0 - (y / 220.0)
value = x / 220.0
hue = float(str(self._cpp._hueSlider.getValue())) / 360.0
color = Color(*hsv_to_rgb(hue, saturation, value))
return color
class _ColorValueChangeListener(IValueChangeListener):
def __init__(self, cpp):
self._cpp = cpp
class RedValueChangeListener(_ColorValueChangeListener):
def valueChange(self, event):
red = event.getProperty().getValue()
newColor = Color(int(red), self._cpp._selectedColor.getGreen(),
self._cpp._selectedColor.getBlue())
self._cpp.setColor(newColor)
class GreenValueChangeListener(_ColorValueChangeListener):
def valueChange(self, event):
green = event.getProperty().getValue()
newColor = Color(self._cpp._selectedColor.getRed(), int(green),
self._cpp._selectedColor.getBlue())
self._cpp.setColor(newColor)
class BlueValueChangeListener(_ColorValueChangeListener):
def valueChange(self, event):
blue = event.getProperty().getValue()
newColor = Color(self._cpp._selectedColor.getRed(),
self._cpp._selectedColor.getGreen(), int(blue))
self._cpp.setColor(newColor)
class HueValueChangeListener(_ColorValueChangeListener):
def valueChange(self, event):
hue = float(str(event.getProperty().getValue())) / 360.0
saturation = float(str(self._cpp._saturationSlider.getValue())) / 100.0
value = float(str(self._cpp._valueSlider.getValue())) / 100.0
# Set the color
color = Color(*hsv_to_rgb(hue, saturation, value))
self._cpp.setColor(color)
# Set the background color of the hue gradient. This has to be
# done here since in the conversion the base color information
# is lost when color is black/white
bgColor = Color(*hsv_to_rgb(hue, 1.0, 1.0))
self._cpp._hsvGradient.setBackgroundColor(bgColor)
class SaturationValueChangeListener(_ColorValueChangeListener):
def valueChange(self, event):
hue = float(str(self._cpp._hueSlider.getValue())) / 360.0
saturation = float(str(event.getProperty().getValue())) / 100.0
value = float(str(self._cpp._valueSlider.getValue())) / 100.0
self._cpp.setColor( Color(*hsv_to_rgb(hue, saturation, value)) )
class BrightnessValueChangeListener(_ColorValueChangeListener):
def valueChange(self, event):
hue = float(str(self._cpp._hueSlider.getValue())) / 360.0
saturation = float(str(self._cpp._saturationSlider.getValue())) / 100.0
value = float(str(event.getProperty().getValue())) / 100.0
self._cpp.setColor( Color(*hsv_to_rgb(hue, saturation, value)) ) | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/ats/model/issue.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
def lazy_import():
from MergePythonSDK.ats.model.issue_status_enum import IssueStatusEnum
globals()['IssueStatusEnum'] = IssueStatusEnum
class Issue(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
defined_types = {
'error_description': (str,), # noqa: E501
'id': (str, none_type,), # noqa: E501
'status': (IssueStatusEnum, none_type,), # noqa: E501
'end_user': ({str: (bool, dict, float, int, list, str, none_type)}, none_type,), # noqa: E501
'first_incident_time': (datetime, none_type, none_type,), # noqa: E501
'last_incident_time': (datetime, none_type, none_type,), # noqa: E501
'is_muted': (bool, none_type,), # noqa: E501
}
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
'error_description': 'error_description', # noqa: E501
'id': 'id', # noqa: E501
'status': 'status', # noqa: E501
'end_user': 'end_user', # noqa: E501
'first_incident_time': 'first_incident_time', # noqa: E501
'last_incident_time': 'last_incident_time', # noqa: E501
'is_muted': 'is_muted', # noqa: E501
}
read_only_vars = {
'id', # noqa: E501
'end_user', # noqa: E501
'is_muted', # noqa: E501
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, error_description, *args, **kwargs): # noqa: E501
"""Issue - a model defined in OpenAPI
Args:
error_description (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
status (IssueStatusEnum): [optional] # noqa: E501
end_user ({str: (bool, dict, float, int, list, str, none_type)}): [optional] # noqa: E501
first_incident_time (datetime, none_type): [optional] # noqa: E501
last_incident_time (datetime, none_type): [optional] # noqa: E501
is_muted (bool): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.error_description = error_description
self.status = kwargs.get("status", None)
self.first_incident_time = kwargs.get("first_incident_time", None)
self.last_incident_time = kwargs.get("last_incident_time", None)
# Read only properties
self._id = kwargs.get("id", str())
self._end_user = kwargs.get("end_user", dict())
self._is_muted = kwargs.get("is_muted", bool())
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, error_description, *args, **kwargs): # noqa: E501
"""Issue - a model defined in OpenAPI
Args:
error_description (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
status (IssueStatusEnum): [optional] # noqa: E501
end_user ({str: (bool, dict, float, int, list, str, none_type)}): [optional] # noqa: E501
first_incident_time (datetime, none_type): [optional] # noqa: E501
last_incident_time (datetime, none_type): [optional] # noqa: E501
is_muted (bool): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.error_description: Union[str] = error_description
self.status: Union["IssueStatusEnum"] = kwargs.get("status", None)
self.first_incident_time: Union[datetime, none_type] = kwargs.get("first_incident_time", None)
self.last_incident_time: Union[datetime, none_type] = kwargs.get("last_incident_time", None)
# Read only properties
self._id: Union[str] = kwargs.get("id", str())
self._end_user: Union[Dict[str, bool, dict, float, int, list, str, none_type]] = kwargs.get("end_user", dict())
self._is_muted: Union[bool] = kwargs.get("is_muted", bool())
# Read only property getters
@property
def id(self):
return self._id
@property
def end_user(self):
return self._end_user
@property
def is_muted(self):
return self._is_muted | PypiClean |
/NotebookProv-0.3.tar.gz/NotebookProv-0.3/notebookprov/NotebookProvenance.py | import sys
import inspect
import parser
import logging
import ast
import copy
import csv
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
class NotebookRecorder():
__instance = None
@staticmethod
def getInstance():
""" Static access method. """
if NotebookRecorder.__instance == None:
NotebookRecorder()
return NotebookRecorder.__instance
def __init__(self):
self.recorder = NotebookProvenance()
""" Virtually private constructor. """
if NotebookRecorder.__instance != None:
raise Exception("This class is a singleton!")
else:
NotebookRecorder.__instance = self
class NotebookProvenance():
def __init__(self,log_file="default.log",rewrite=False,record_in_memory=False):
self.source = {}
self.vars = {}
self.var_id = set()
self.dependency = []
self.trace = []
self.previous_frame = {}
self.temp_pon = []
self.data_graph = []
#self.log_file = log_file
self.set_log_file(log_file,rewrite)
self.counter = 0
self.code_pos = {}
self.line_stack = {}
self.record_in_memory = record_in_memory
def set_log_file(self,log_file,rewrite):
self.log_file = log_file
self.rewrite = rewrite
self.prepare_file()
def prepare_file(self):
if self.rewrite:
self.file = open(self.log_file,"w")
else:
self.file = open(self.log_file,"a")
self.csv_writer = csv.writer(self.file)
def parse_vars(self,source):
result = {"t":[],"v":[]}
root = ast.parse(source.strip())
#is_body = False
for node in ast.walk(root):
if isinstance(node, ast.Name):
if isinstance(node.ctx, ast.Store):
result["t"].append(node.id)
else:
result["v"].append(node.id)
elif isinstance(node, ast.Attribute):
#yield node.attr
result["v"].append(node.attr)
elif isinstance(node, ast.FunctionDef):
#yield node.name
result["v"].append(node.name)
return result
def get_annotation_var(self,filename):
try:
self.vars[filename]
except:
self.vars[filename] = {}
for i,x in enumerate(self.source[filename][0]):
#print(x)
if x.strip().startswith("#@begin "):
#print(x)
yy = x.strip().split(" ")
try:
self.vars[filename][yy[1]]
except:
self.vars[filename][yy[1]] = {}
self.vars[filename][yy[1]]["val"] = []
self.vars[filename][yy[1]]["start"] = None
self.vars[filename][yy[1]]["end"] = None
self.vars[filename][yy[1]]["val"] = yy[2:]
self.vars[filename][yy[1]]["start"] = i
if x.strip().startswith("#@end "):
yy = x.strip().split(" ")
try:
self.vars[filename][yy[1]]["end"] = i
except:
pass
def record_vars(self,frame):
var_name = frame.f_locals.keys()
for x in var_name:
self.var_id.add((id(frame.f_locals[x]),x))
def add_source(self,filename,frame):
try:
self.source[filename]
except:
self.source[filename] = inspect.getsourcelines(frame)
#self.trace[filename] = []
self.get_annotation_var(filename)
def trace_vars(self,frame):
if self.previous_frame!=None:
previous_frame = self.previous_frame
local_var = previous_frame.f_locals
previous_line_no = previous_frame.f_lineno
co = previous_frame.f_code
func_name = co.co_name
previous_filename = co.co_filename
#varname = self.vars[previous_filename]
local_var = frame.f_locals
line_no = frame.f_lineno
co = frame.f_code
func_name = co.co_name
filename = co.co_filename
varname = self.vars[filename]
for y in varname:
if (previous_line_no-1)>=self.vars[filename][y]["start"] and (previous_line_no-1)<=self.vars[filename][y]["end"]:
for yy in varname[y]["val"]:
if self.source[previous_filename][0][previous_line_no-1].find(yy) >= 0:
try:
print(y,yy)
#print(inspect.stack()[:4])
floc = local_var[yy].copy()
#print(floc)
#self.trace[filename].append((filename,func_name,line_no,self.source[filename][0][line_no-1],yy,floc))
#self.trace.append((filename,func_name,line_no,self.source[filename][0][line_no-1],yy,floc))
self.trace.append((filename,y,func_name,line_no,self.source[previous_filename][0][previous_line_no-1],yy,id(yy),floc))
except BaseException as ex:
#raise ex
pass
self.previous_frame = frame
def trace_calls(self,frame, event, arg):
co = frame.f_code
func_name = co.co_names
if func_name == 'write':
# Ignore write() calls from print statements
return
if event == 'return':
#print("return",arg)
pass
if func_name == "CaptureProvenance":
return
line_no = frame.f_lineno
filename = co.co_filename
logging.debug(("call:",event,func_name,arg,line_no,filename))
# only give attention for function / operations that is called from the python notebook
if filename.startswith("<ipython-input"):
return self.trace_lines
return
def trace_lines(self,frame,event,arg):
#co = frame.f_code
#func_name = co.co_names
#if (event != 'line') or ("DataFrame" not in func_name):
# return
#print("line",co,co.co_name,func_name,event,arg)
co = frame.f_code
func_name = co.co_name
line_no = frame.f_lineno
filename = co.co_filename
if filename.startswith("<ipython-input"):
#logging.debug(("line:",event,func_name,arg,line_no,frame.f_locals.keys()))
#print(filename,frame.f_locals.keys(),func_name,event,arg,line_no)
defined_names = set(frame.f_locals.keys())
self.add_source(filename,frame)
if event == "line" and func_name!="<module>":
try:
line_code = self.source[frame.f_code.co_filename][0][frame.f_lineno-1]
except:
return
#line_code = " ".join(self.source[frame.f_code.co_filename][0][frame.f_code.co_firstlineno-1:frame.f_lineno])
parsable = False
trace_line = frame.f_lineno
try:
self.code_pos[func_name]
except:
self.code_pos[func_name] = (0,1)
if trace_line >= self.code_pos[func_name][0] and trace_line <= self.code_pos[func_name][1]:
return
#if trace_line<self.temp_trace_line:
# return
pvars = {"t":[],"v":[]}
while not parsable and trace_line<len(self.source[frame.f_code.co_filename][0]):
try:
pvars = self.parse_vars(line_code.strip())
parsable = True
except:
trace_line+=1
line_code = " ".join(self.source[frame.f_code.co_filename][0][frame.f_lineno-1:trace_line])
#self.temp_continue = True
self.code_pos[func_name] = (frame.f_lineno-1,trace_line)
self.temp_trace_line = trace_line
logging.debug(("pvars_line:",pvars,line_code,frame.f_locals))
try:
lstack = copy.deepcopy(self.line_stack[func_name])
self.line_stack[func_name] = [pvars,None]
except:
lstack = None
self.line_stack[func_name] = [pvars,None]
new_identifier_val = []
if lstack!=None:
found = []
not_found = []
#for x in st_com.co_names:
for x in lstack[0]["t"]:
#for x in pvars["t"]:
try:
found.append((id(frame.f_locals[x]),x))
except:
not_found.append(x)
for x in found:
try:
new_identifier_val.append((x,copy.deepcopy(frame.f_locals[x[1]])))
except:
continue
logging.debug(("new_identifier:",new_identifier_val))
"""
try:
frame_back = self.previous_frame[func_name]
except:
frame_back = frame.f_locals.copy()
"""
frame_back = frame.f_locals.copy()
"""
if self.previous_frame != None:
frame_back = self.previous_frame
else:
frame_back = frame.f_locals.copy()
"""
#logging.debug((frame_back,frame))
found = []
for x in pvars["v"]:
try:
found.append(((id(frame_back[x]),x),copy.deepcopy(frame_back[x])))
except:
not_found.append(x)
self.line_stack[func_name][1] = (found,line_code,filename,line_no,func_name)
#print("new_identifier:",set.difference(set(found),self.var_id))
#print("used_identifier:",set.intersection(set(found),self.var_id))
#self.data_graph.append((new_identifier_val,[(x,frame.f_locals[x]) for x in used_identifier],line_code,filename,line_no))
if len(new_identifier_val)==0:
new_identifier_val = found
if lstack!=None:
ffound = lstack[1][0]
#ffound = found
#self.data_graph.append((new_identifier_val,ffound.copy(),line_code,filename,line_no,func_name))
if self.record_in_memory:
self.data_graph.append((new_identifier_val,ffound.copy(),lstack[1][1],lstack[1][2],lstack[1][3],lstack[1][4]))
#logging.debug((new_identifier_val,ffound.copy(),lstack[1][1],lstack[1][2],lstack[1][3],lstack[1][4]))
for x in new_identifier_val:
for y in ffound:
temp_write = [x[0][0],x[0][1],str(x[1]),y[0][0],y[0][1],str(y[1]),lstack[1][1],lstack[1][2],lstack[1][3],lstack[1][4]]
#temp_write = [x[0][0],x[0][1],str(x[1]),y[0][0],y[0][1],str(y[1]),line_code,filename,line_no,func_name]
self.csv_writer.writerow(temp_write)
#self.data_graph.append((new_identifier_val,self.temp_pon.copy(),line_code,filename,line_no))
"""
#if lstack!=None:
#ffound = lstack[1][0]
ffound = found
self.data_graph.append((new_identifier_val,ffound.copy(),line_code,filename,line_no,func_name))
#self.data_graph.append((new_identifier_val,ffound.copy(),lstack[1][1],lstack[1][2],lstack[1][3],lstack[1][4]))
#logging.debug((new_identifier_val,ffound.copy(),lstack[1][1],lstack[1][2],lstack[1][3],lstack[1][4]))
for x in new_identifier_val:
for y in ffound:
#temp_write = [x[0][0],x[0][1],str(x[1]),y[0][0],y[0][1],str(y[1]),lstack[1][1],lstack[1][2],lstack[1][3],lstack[1][4]]
temp_write = [x[0][0],x[0][1],str(x[1]),y[0][0],y[0][1],str(y[1]),line_code,filename,line_no,func_name]
self.csv_writer.writerow(temp_write)
#self.data_graph.append((new_identifier_val,self.temp_pon.copy(),line_code,filename,line_no))
"""
#print(self.temp_pon)
#print("new_var:",set.intersection(set(self.temp_pon),defined_names))
#self.temp_pon.clear()
#self.previous_frame = frame.f_locals.copy()
self.previous_frame[func_name] = frame.f_locals.copy()
elif event == "return" and func_name=="<module>":
try:
line_code = " ".join(self.source[frame.f_code.co_filename][0][frame.f_code.co_firstlineno-1:frame.f_lineno])
except:
return
try:
pvars = self.parse_vars(line_code.strip())
except:
pvars = {"t":[],"v":[]}
found = []
not_found = []
#for x in st_com.co_names:
for x in pvars["t"]:
try:
found.append((id(frame.f_locals[x]),x))
except:
not_found.append(x)
new_identifier = set.difference(set(found),self.var_id)
new_identifier_val = []
for x in new_identifier:
try:
new_identifier_val.append((x,copy.deepcopy(frame.f_locals[x[1]])))
except:
continue
"""
if self.previous_frame != None:
frame_back = self.previous_frame
else:
frame_back = frame.f_locals.copy()
"""
try:
frame_back = self.previous_frame[func_name]
except:
frame_back = frame.f_locals.copy()
#logging.debug((frame_back,frame))
found = []
for x in pvars["v"]:
try:
found.append(((id(frame_back[x]),x),copy.deepcopy(frame_back[x])))
except:
not_found.append(x)
if len(new_identifier_val)==0:
new_identifier_val = found
if self.record_in_memory:
self.data_graph.append((new_identifier_val,found.copy(),line_code,filename,line_no,func_name))
for x in new_identifier_val:
for y in found:
temp_write = [x[0][0],x[0][1],str(x[1]),y[0][0],y[0][1],str(y[1]),line_code,filename,line_no,func_name]
self.csv_writer.writerow(temp_write)
#self.temp_pon.clear()
self.previous_frame[func_name] = frame.f_locals.copy()
self.record_vars(frame)
def start_trace(self):
sys.settrace(self.trace_calls)
def stop_trace(self):
sys.settrace(None)
self.file.close()
from IPython.core.magic import (register_line_magic, register_cell_magic,
register_line_cell_magic)
@register_line_magic
def capture_provenance_start(line):
args = line.split(" ")
NotebookRecorder.getInstance().recorder.set_log_file(args[0],eval(args[1]))
NotebookRecorder.getInstance().recorder.start_trace()
@register_line_magic
def capture_provenance_stop(line):
NotebookRecorder.getInstance().recorder.stop_trace() | PypiClean |
/FireWorks-2.0.3.tar.gz/FireWorks-2.0.3/fireworks/core/launchpad.py | import datetime
import json
import os
import random
import shutil
import time
import traceback
import warnings
from collections import defaultdict
from itertools import chain
import gridfs
from bson import ObjectId
from monty.os.path import zpath
from monty.serialization import loadfn
from pymongo import ASCENDING, DESCENDING, MongoClient
from pymongo.errors import DocumentTooLarge
from tqdm import tqdm
from fireworks.core.firework import Firework, FWAction, Launch, Tracker, Workflow
from fireworks.fw_config import (
GRIDFS_FALLBACK_COLLECTION,
LAUNCHPAD_LOC,
MAINTAIN_INTERVAL,
MONGO_SOCKET_TIMEOUT_MS,
RESERVATION_EXPIRATION_SECS,
RUN_EXPIRATION_SECS,
SORT_FWS,
WFLOCK_EXPIRATION_KILL,
WFLOCK_EXPIRATION_SECS,
)
from fireworks.utilities.fw_serializers import (
FWSerializable,
reconstitute_dates,
recursive_dict,
)
from fireworks.utilities.fw_utilities import get_fw_logger
__author__ = "Anubhav Jain"
__copyright__ = "Copyright 2013, The Materials Project"
__maintainer__ = "Anubhav Jain"
__email__ = "[email protected]"
__date__ = "Jan 30, 2013"
# TODO: lots of duplication reduction and cleanup possible
def sort_aggregation(sort):
"""Build sorting aggregation pipeline.
Args:
sort [(str,int)]: sorting keys and directions as a list of
(str, int) tuples, i.e. [('updated_on', 1)]
"""
# Fix for sorting by dates which are actually stored as strings:
# Not sure about the underlying issue's source, but apparently some
# dates are stored as strings and others as date objects.
# Following pipeline makes sure all stored dates are actually date
# objects for proper comparison when sorting.
# Assumption below is that dates are either strings or date objects,
# nothing else.
aggregation = []
for k, _ in sort:
if k in {"updated_on", "created_on"}:
aggregation.append(
{
"$set": {
k: {
"$dateFromString": {
"dateString": "$" + k,
"onError": "$" + k, # if conversion fails, just return original object
}
}
}
}
)
aggregation.append({"$sort": {k: v for k, v in sort}})
return aggregation
class LockedWorkflowError(ValueError):
"""
Error raised if the context manager WFLock can't acquire the lock on the WF within the selected
time interval (WFLOCK_EXPIRATION_SECS), if the killing of the lock is disabled (WFLOCK_EXPIRATION_KILL)
"""
class WFLock:
"""
Lock a Workflow, i.e. for performing update operations
Raises a LockedWorkflowError if the lock couldn't be acquired within expire_secs and kill==False.
Calling functions are responsible for handling the error in order to avoid database inconsistencies.
"""
def __init__(self, lp, fw_id, expire_secs=WFLOCK_EXPIRATION_SECS, kill=WFLOCK_EXPIRATION_KILL):
"""
Args:
lp (LaunchPad)
fw_id (int): Firework id
expire_secs (int): max waiting time in seconds.
kill (bool): force lock acquisition or not
"""
self.lp = lp
self.fw_id = fw_id
self.expire_secs = expire_secs
self.kill = kill
def __enter__(self):
ctr = 0
waiting_time = 0
# acquire lock
links_dict = self.lp.workflows.find_one_and_update(
{"nodes": self.fw_id, "locked": {"$exists": False}}, {"$set": {"locked": True}}
)
# could not acquire lock b/c WF is already locked for writing
while not links_dict:
ctr += 1
time_incr = ctr / 10.0 + random.random() / 100.0
time.sleep(time_incr) # wait a bit for lock to free up
waiting_time += time_incr
if waiting_time > self.expire_secs: # too much time waiting, expire lock
wf = self.lp.workflows.find_one({"nodes": self.fw_id})
if not wf:
raise ValueError(f"Could not find workflow in database: {self.fw_id}")
if self.kill: # force lock acquisition
self.lp.m_logger.warning(f"FORCIBLY ACQUIRING LOCK, WF: {self.fw_id}")
links_dict = self.lp.workflows.find_one_and_update(
{"nodes": self.fw_id}, {"$set": {"locked": True}}
)
else: # throw error if we don't want to force lock acquisition
raise LockedWorkflowError(f"Could not get workflow - LOCKED: {self.fw_id}")
else:
# retry lock
links_dict = self.lp.workflows.find_one_and_update(
{"nodes": self.fw_id, "locked": {"$exists": False}}, {"$set": {"locked": True}}
)
def __exit__(self, exc_type, exc_val, exc_tb):
self.lp.workflows.find_one_and_update({"nodes": self.fw_id}, {"$unset": {"locked": True}})
class LaunchPad(FWSerializable):
"""
The LaunchPad manages the FireWorks database.
"""
def __init__(
self,
host=None,
port=None,
name=None,
username=None,
password=None,
logdir=None,
strm_lvl=None,
user_indices=None,
wf_user_indices=None,
authsource=None,
uri_mode=False,
mongoclient_kwargs=None,
):
"""
Args:
host (str): hostname. If uri_mode is True, a MongoDB connection string URI
(https://docs.mongodb.com/manual/reference/connection-string/) can be used instead of the remaining
options below.
port (int): port number
name (str): database name
username (str)
password (str)
logdir (str): path to the log directory
strm_lvl (str): the logger stream level
user_indices (list): list of 'fireworks' collection indexes to be built
wf_user_indices (list): list of 'workflows' collection indexes to be built
authsource (str): authSource parameter for MongoDB authentication; defaults to "name" (i.e., db name) if
not set
uri_mode (bool): if set True, all Mongo connection parameters occur through a MongoDB URI string (set as
the host).
mongoclient_kwargs (dict): A list of any other custom keyword arguments to be
passed into the MongoClient connection. Use these kwargs to specify SSL/TLS or serverSelectionTimeoutMS
arguments. Note these arguments are different depending on the major pymongo version used; see
pymongo documentation for more details.
"""
self.host = host if (host or uri_mode) else "localhost"
self.port = port if (port or uri_mode) else 27017
self.name = name if (name or uri_mode) else "fireworks"
self.username = username
self.password = password
self.authsource = authsource or self.name
self.mongoclient_kwargs = mongoclient_kwargs or {}
self.uri_mode = uri_mode
# set up logger
self.logdir = logdir
self.strm_lvl = strm_lvl if strm_lvl else "INFO"
self.m_logger = get_fw_logger("launchpad", l_dir=self.logdir, stream_level=self.strm_lvl)
self.user_indices = user_indices if user_indices else []
self.wf_user_indices = wf_user_indices if wf_user_indices else []
# get connection
if uri_mode:
self.connection = MongoClient(host, **self.mongoclient_kwargs)
dbname = host.split("/")[-1].split("?")[0] # parse URI to extract dbname
self.db = self.connection[dbname]
else:
self.connection = MongoClient(
self.host,
self.port,
socketTimeoutMS=MONGO_SOCKET_TIMEOUT_MS,
username=self.username,
password=self.password,
authSource=self.authsource,
**self.mongoclient_kwargs,
)
self.db = self.connection[self.name]
self.fireworks = self.db.fireworks
self.launches = self.db.launches
self.offline_runs = self.db.offline_runs
self.fw_id_assigner = self.db.fw_id_assigner
self.workflows = self.db.workflows
if GRIDFS_FALLBACK_COLLECTION:
self.gridfs_fallback = gridfs.GridFS(self.db, GRIDFS_FALLBACK_COLLECTION)
else:
self.gridfs_fallback = None
self.backup_launch_data = {}
self.backup_fw_data = {}
def to_dict(self):
"""
Note: usernames/passwords are exported as unencrypted Strings!
"""
return {
"host": self.host,
"port": self.port,
"name": self.name,
"username": self.username,
"password": self.password,
"logdir": self.logdir,
"strm_lvl": self.strm_lvl,
"user_indices": self.user_indices,
"wf_user_indices": self.wf_user_indices,
"authsource": self.authsource,
"uri_mode": self.uri_mode,
"mongoclient_kwargs": self.mongoclient_kwargs,
}
def update_spec(self, fw_ids, spec_document, mongo=False):
"""
Update fireworks with a spec. Sometimes you need to modify a firework in progress.
Args:
fw_ids [int]: All fw_ids to modify.
spec_document (dict): The spec document. Note that only modifications to
the spec key are allowed. So if you supply {"_tasks.1.parameter": "hello"},
you are effectively modifying spec._tasks.1.parameter in the actual fireworks
collection.
mongo (bool): spec_document uses mongo syntax to directly update the spec
"""
if mongo:
mod_spec = spec_document
else:
mod_spec = {"$set": {("spec." + k): v for k, v in spec_document.items()}}
allowed_states = ["READY", "WAITING", "FIZZLED", "DEFUSED", "PAUSED"]
self.fireworks.update_many({"fw_id": {"$in": fw_ids}, "state": {"$in": allowed_states}}, mod_spec)
for fw in self.fireworks.find(
{"fw_id": {"$in": fw_ids}, "state": {"$nin": allowed_states}}, {"fw_id": 1, "state": 1}
):
self.m_logger.warning(
f"Cannot update spec of fw_id: {fw['fw_id']} with state: {fw['state']}. Try rerunning first."
)
@classmethod
def from_dict(cls, d):
port = d.get("port", None)
name = d.get("name", None)
username = d.get("username", None)
password = d.get("password", None)
logdir = d.get("logdir", None)
strm_lvl = d.get("strm_lvl", None)
user_indices = d.get("user_indices", [])
wf_user_indices = d.get("wf_user_indices", [])
authsource = d.get("authsource", None)
uri_mode = d.get("uri_mode", False)
mongoclient_kwargs = d.get("mongoclient_kwargs", None)
return LaunchPad(
d["host"],
port,
name,
username,
password,
logdir,
strm_lvl,
user_indices,
wf_user_indices,
authsource,
uri_mode,
mongoclient_kwargs,
)
@classmethod
def auto_load(cls):
if LAUNCHPAD_LOC:
return LaunchPad.from_file(LAUNCHPAD_LOC)
return LaunchPad()
def reset(self, password, require_password=True, max_reset_wo_password=25):
"""
Create a new FireWorks database. This will overwrite the existing FireWorks database! To
safeguard against accidentally erasing an existing database, a password must be entered.
Args:
password (str): A String representing today's date, e.g. '2012-12-31'
require_password (bool): Whether a password is required to reset the DB. Setting to
false is dangerous because running code unintentionally could clear your DB - use
max_reset_wo_password to minimize risk.
max_reset_wo_password (int): A failsafe; when require_password is set to False,
FWS will not clear DBs that contain more workflows than this parameter
"""
m_password = datetime.datetime.now().strftime("%Y-%m-%d")
if password == m_password or (
not require_password and self.workflows.count_documents({}) <= max_reset_wo_password
):
self.fireworks.delete_many({})
self.launches.delete_many({})
self.workflows.delete_many({})
self.offline_runs.delete_many({})
self._restart_ids(1, 1)
if self.gridfs_fallback is not None:
self.db.drop_collection(f"{GRIDFS_FALLBACK_COLLECTION}.chunks")
self.db.drop_collection(f"{GRIDFS_FALLBACK_COLLECTION}.files")
self.tuneup()
self.m_logger.info("LaunchPad was RESET.")
elif not require_password:
raise ValueError(
f"Password check cannot be overridden since the size of DB ({self.fireworks.count_documents({})} "
f"workflows) is greater than the max_reset_wo_password parameter ({max_reset_wo_password})."
)
else:
raise ValueError(f"Invalid password! Password is today's date: {m_password}")
def maintain(self, infinite=True, maintain_interval=None):
"""
Perform launchpad maintenance: detect lost runs and unreserved RESERVE launches.
Args:
infinite (bool)
maintain_interval (seconds): sleep time
"""
maintain_interval = maintain_interval if maintain_interval else MAINTAIN_INTERVAL
while True:
self.m_logger.info("Performing maintenance on Launchpad...")
self.m_logger.debug("Tracking down FIZZLED jobs...")
fl, ff, inconsistent_fw_ids = self.detect_lostruns(fizzle=True)
if fl:
self.m_logger.info(f"Detected {len(fl)} FIZZLED launches: {fl}")
self.m_logger.info(f"Detected {len(ff)} FIZZLED FWs: {ff}")
if inconsistent_fw_ids:
self.m_logger.info(
f"Detected {len(inconsistent_fw_ids)} FIZZLED inconsistent fireworks: {inconsistent_fw_ids}"
)
self.m_logger.debug("Tracking down stuck RESERVED jobs...")
ur = self.detect_unreserved(rerun=True)
if ur:
self.m_logger.info(f"Unreserved {len(ur)} RESERVED launches: {ur}")
self.m_logger.info("LaunchPad was MAINTAINED.")
if not infinite:
break
self.m_logger.debug(f"Sleeping for {maintain_interval} secs...")
time.sleep(maintain_interval)
def add_wf(self, wf, reassign_all=True):
"""
Add workflow(or firework) to the launchpad. The firework ids will be reassigned.
Args:
wf (Workflow/Firework)
Returns:
dict: mapping between old and new Firework ids
"""
if isinstance(wf, Firework):
wf = Workflow.from_Firework(wf)
# sets the root FWs as READY
# prefer to wf.refresh() for speed reasons w/many root FWs
for fw_id in wf.root_fw_ids:
wf.id_fw[fw_id].state = "READY"
wf.fw_states[fw_id] = "READY"
# insert the FireWorks and get back mapping of old to new ids
old_new = self._upsert_fws(list(wf.id_fw.values()), reassign_all=reassign_all)
# update the Workflow with the new ids
wf._reassign_ids(old_new)
# insert the WFLinks
self.workflows.insert_one(wf.to_db_dict())
self.m_logger.info(f"Added a workflow. id_map: {old_new}")
return old_new
def bulk_add_wfs(self, wfs):
"""
Adds a list of workflows to the fireworks database
using insert_many for both the fws and wfs, is
more efficient than adding them one at a time.
Args:
wfs ([Workflow]): list of workflows or fireworks
Returns:
None
"""
# Make all fireworks workflows
wfs = [Workflow.from_firework(wf) if isinstance(wf, Firework) else wf for wf in wfs]
# Initialize new firework counter, starting from the next fw id
total_num_fws = sum(len(wf) for wf in wfs)
new_fw_counter = self.fw_id_assigner.find_one_and_update({}, {"$inc": {"next_fw_id": total_num_fws}})[
"next_fw_id"
]
for wf in tqdm(wfs):
# Reassign fw_ids and increment the counter
old_new = dict(zip(wf.id_fw.keys(), range(new_fw_counter, new_fw_counter + len(wf))))
for fw in wf:
fw.fw_id = old_new[fw.fw_id]
wf._reassign_ids(old_new)
new_fw_counter += len(wf)
# Set root fws to READY
for fw_id in wf.root_fw_ids:
wf.id_fw[fw_id].state = "READY"
wf.fw_states[fw_id] = "READY"
# Insert all fws and wfs, do workflows first so fws don't
# get checked out prematurely
self.workflows.insert_many(wf.to_db_dict() for wf in wfs)
all_fws = chain.from_iterable(wf for wf in wfs)
self.fireworks.insert_many(fw.to_db_dict() for fw in all_fws)
return None
def append_wf(self, new_wf, fw_ids, detour=False, pull_spec_mods=True):
"""
Append a new workflow on top of an existing workflow.
Args:
new_wf (Workflow): The new workflow to append
fw_ids ([int]): The parent fw_ids at which to append the workflow
detour (bool): Whether to connect the new Workflow in a "detour" style, i.e., move
original children of the parent fw_ids to the new_wf
pull_spec_mods (bool): Whether the new Workflow should pull the FWActions of the parent
fw_ids
"""
wf = self.get_wf_by_fw_id(fw_ids[0])
updated_ids = wf.append_wf(new_wf, fw_ids, detour=detour, pull_spec_mods=pull_spec_mods)
with WFLock(self, fw_ids[0]):
self._update_wf(wf, updated_ids)
def get_launch_by_id(self, launch_id):
"""
Given a Launch id, return details of the Launch.
Args:
launch_id (int): launch id
Returns:
Launch object
"""
m_launch = self.launches.find_one({"launch_id": launch_id})
if m_launch:
m_launch["action"] = get_action_from_gridfs(m_launch.get("action"), self.gridfs_fallback)
return Launch.from_dict(m_launch)
raise ValueError(f"No Launch exists with launch_id: {launch_id}")
def get_fw_dict_by_id(self, fw_id):
"""
Given firework id, return firework dict.
Args:
fw_id (int): firework id
Returns:
dict
"""
fw_dict = self.fireworks.find_one({"fw_id": fw_id})
if not fw_dict:
raise ValueError(f"No Firework exists with id: {fw_id}")
# recreate launches from the launch collection
launches = list(
self.launches.find({"launch_id": {"$in": fw_dict["launches"]}}, sort=[("launch_id", ASCENDING)])
)
for l in launches:
l["action"] = get_action_from_gridfs(l.get("action"), self.gridfs_fallback)
fw_dict["launches"] = launches
launches = list(
self.launches.find({"launch_id": {"$in": fw_dict["archived_launches"]}}, sort=[("launch_id", ASCENDING)])
)
for l in launches:
l["action"] = get_action_from_gridfs(l.get("action"), self.gridfs_fallback)
fw_dict["archived_launches"] = launches
return fw_dict
def get_fw_by_id(self, fw_id):
"""
Given a Firework id, give back a Firework object.
Args:
fw_id (int): Firework id.
Returns:
Firework object
"""
return Firework.from_dict(self.get_fw_dict_by_id(fw_id))
def get_wf_by_fw_id(self, fw_id):
"""
Given a Firework id, give back the Workflow containing that Firework.
Args:
fw_id (int)
Returns:
A Workflow object
"""
links_dict = self.workflows.find_one({"nodes": fw_id})
if not links_dict:
raise ValueError(f"Could not find a Workflow with fw_id: {fw_id}")
fws = map(self.get_fw_by_id, links_dict["nodes"])
return Workflow(
fws,
links_dict["links"],
links_dict["name"],
links_dict["metadata"],
links_dict["created_on"],
links_dict["updated_on"],
)
def get_wf_by_fw_id_lzyfw(self, fw_id):
"""
Given a FireWork id, give back the Workflow containing that FireWork.
Args:
fw_id (int)
Returns:
A Workflow object
"""
links_dict = self.workflows.find_one({"nodes": fw_id})
if not links_dict:
raise ValueError(f"Could not find a Workflow with fw_id: {fw_id}")
fws = []
for fw_id in links_dict["nodes"]:
fws.append(LazyFirework(fw_id, self.fireworks, self.launches, self.gridfs_fallback))
# Check for fw_states in links_dict to conform with pre-optimized workflows
if "fw_states" in links_dict:
fw_states = {int(k): v for (k, v) in links_dict["fw_states"].items()}
else:
fw_states = None
return Workflow(
fws,
links_dict["links"],
links_dict["name"],
links_dict["metadata"],
links_dict["created_on"],
links_dict["updated_on"],
fw_states,
)
def delete_fws(self, fw_ids, delete_launch_dirs=False):
"""Delete a set of fireworks identified by their fw_ids.
ATTENTION: This function serves maintenance purposes and will leave
workflows untouched. Its use will thus result in a corrupted database.
Use 'delete_wf' instead for consistently deleting workflows together
with theit fireworks.
Args:
fw_ids ([int]): Firework ids
delete_launch_dirs (bool): if True all the launch directories associated with
the WF will be deleted as well, if possible.
"""
potential_launch_ids = []
launch_ids = []
for i in fw_ids:
fw_dict = self.fireworks.find_one({"fw_id": i})
potential_launch_ids += fw_dict["launches"] + fw_dict["archived_launches"]
for i in potential_launch_ids: # only remove launches if no other fws refer to them
if not self.fireworks.find_one(
{"$or": [{"launches": i}, {"archived_launches": i}], "fw_id": {"$nin": fw_ids}}, {"launch_id": 1}
):
launch_ids.append(i)
if delete_launch_dirs:
launch_dirs = []
for i in launch_ids:
launch_dirs.append(self.launches.find_one({"launch_id": i}, {"launch_dir": 1})["launch_dir"])
print(f"Remove folders {launch_dirs}")
for d in launch_dirs:
shutil.rmtree(d, ignore_errors=True)
print(f"Remove fws {fw_ids}")
if self.gridfs_fallback is not None:
for lid in launch_ids:
for f in self.gridfs_fallback.find({"metadata.launch_id": lid}):
self.gridfs_fallback.delete(f._id)
print(f"Remove launches {launch_ids}")
self.launches.delete_many({"launch_id": {"$in": launch_ids}})
self.offline_runs.delete_many({"launch_id": {"$in": launch_ids}})
self.fireworks.delete_many({"fw_id": {"$in": fw_ids}})
def delete_wf(self, fw_id, delete_launch_dirs=False):
"""
Delete the workflow containing firework with the given id.
Args:
fw_id (int): Firework id
delete_launch_dirs (bool): if True all the launch directories associated with
the WF will be deleted as well, if possible.
delete_launch_dirs"""
links_dict = self.workflows.find_one({"nodes": fw_id})
fw_ids = links_dict["nodes"]
self.delete_fws(fw_ids, delete_launch_dirs=delete_launch_dirs)
print("Removing workflow.")
self.workflows.delete_one({"nodes": fw_id})
def get_wf_summary_dict(self, fw_id, mode="more"):
"""
A much faster way to get summary information about a Workflow by querying only for
needed information.
Args:
fw_id (int): A Firework id.
mode (str): Choose between "more", "less" and "all" in terms of quantity of information.
Returns:
dict: information about Workflow.
"""
wf_fields = ["state", "created_on", "name", "nodes"]
fw_fields = ["state", "fw_id"]
launch_fields = []
if mode != "less":
wf_fields.append("updated_on")
fw_fields.extend(["name", "launches"])
launch_fields.append("launch_id")
launch_fields.append("launch_dir")
if mode == "reservations":
launch_fields.append("state_history.reservation_id")
if mode == "all":
wf_fields = None
wf = self.workflows.find_one({"nodes": fw_id}, projection=wf_fields)
fw_data = []
id_name_map = {}
launch_ids = []
for fw in self.fireworks.find({"fw_id": {"$in": wf["nodes"]}}, projection=fw_fields):
if launch_fields:
launch_ids.extend(fw["launches"])
fw_data.append(fw)
if mode != "less":
id_name_map[fw["fw_id"]] = f"{fw['name']}--{int(fw['fw_id'])}"
if launch_fields:
launch_info = defaultdict(list)
for l in self.launches.find({"launch_id": {"$in": launch_ids}}, projection=launch_fields):
for i, fw in enumerate(fw_data):
if l["launch_id"] in fw["launches"]:
launch_info[i].append(l)
for k, v in launch_info.items():
fw_data[k]["launches"] = v
wf["fw"] = fw_data
# Post process the summary dict so that it "looks" better.
if mode == "less":
wf["states_list"] = "-".join(
[fw["state"][:3] if fw["state"].startswith("R") else fw["state"][0] for fw in wf["fw"]]
)
del wf["nodes"]
if mode == "more" or mode == "all":
wf["states"] = {}
wf["launch_dirs"] = {}
for fw in wf["fw"]:
k = f"{fw['name']}--{int(fw['fw_id'])}"
wf["states"][k] = fw["state"]
wf["launch_dirs"][k] = [l["launch_dir"] for l in fw["launches"]]
del wf["nodes"]
if mode == "all":
del wf["fw_states"]
wf["links"] = {id_name_map[int(k)]: [id_name_map[i] for i in v] for k, v in wf["links"].items()}
wf["parent_links"] = {
id_name_map[int(k)]: [id_name_map[i] for i in v] for k, v in wf["parent_links"].items()
}
if mode == "reservations":
wf["states"] = {}
wf["launches"] = {}
for fw in wf["fw"]:
k = f"{fw['name']}--{int(fw['fw_id'])}"
wf["states"][k] = fw["state"]
wf["launches"][k] = fw["launches"]
del wf["nodes"]
del wf["_id"]
del wf["fw"]
return wf
def get_fw_ids(self, query=None, sort=None, limit=0, count_only=False, launches_mode=False):
"""
Return all the fw ids that match a query.
Args:
query (dict): representing a Mongo query
sort [(str,str)]: sort argument in Pymongo format
limit (int): limit the results
count_only (bool): only return the count rather than explicit ids
launches_mode (bool): query the launches collection instead of fireworks
Returns:
list: list of firework ids matching the query
"""
coll = "launches" if launches_mode else "fireworks"
criteria = query if query else {}
if launches_mode:
lids = self._get_active_launch_ids()
criteria["launch_id"] = {"$in": lids}
if count_only:
if limit:
return ValueError("Cannot count_only and limit at the same time!")
aggregation = []
if criteria is not None:
aggregation.append({"$match": criteria})
if count_only:
aggregation.append({"$count": "count"})
self.m_logger.debug(f"Aggregation '{aggregation}'.")
cursor = getattr(self, coll).aggregate(aggregation)
res = list(cursor)
return res[0]["count"] if len(res) > 0 else 0
if sort is not None:
aggregation.extend(sort_aggregation(sort))
aggregation.append({"$project": {"fw_id": True, "_id": False}})
if limit is not None and limit > 0:
aggregation.append({"$limit": limit})
self.m_logger.debug(f"Aggregation '{aggregation}'.")
cursor = getattr(self, coll).aggregate(aggregation)
return [fw["fw_id"] for fw in cursor]
def get_wf_ids(self, query=None, sort=None, limit=0, count_only=False):
"""
Return one fw id for all workflows that match a query.
Args:
query (dict): representing a Mongo query
sort [(str,str)]: sort argument in Pymongo format
limit (int): limit the results
count_only (bool): only return the count rather than explicit ids
Returns:
list: list of firework ids
"""
criteria = query if query else {}
aggregation = []
if criteria is not None:
aggregation.append({"$match": criteria})
if count_only:
aggregation.append({"$count": "count"})
self.m_logger.debug(f"Aggregation '{aggregation}'.")
cursor = self.workflows.aggregate(aggregation)
res = list(cursor)
return res[0]["count"] if len(res) > 0 else 0
if sort is not None:
aggregation.extend(sort_aggregation(sort))
aggregation.append({"$project": {"nodes": True, "_id": False}})
if limit is not None and limit > 0:
aggregation.append({"$limit": limit})
self.m_logger.debug(f"Aggregation '{aggregation}'.")
cursor = self.workflows.aggregate(aggregation)
return [fw["nodes"][0] for fw in cursor]
def get_fw_ids_in_wfs(
self, wf_query=None, fw_query=None, sort=None, limit=0, count_only=False, launches_mode=False
):
"""
Return all fw ids that match fw_query within workflows that match wf_query.
Args:
wf_query (dict): representing a Mongo query on workflows
fw_query (dict): representing a Mongo query on Fireworks
sort [(str,str)]: sort argument in Pymongo format
limit (int): limit the results
count_only (bool): only return the count rather than explicit ids
launches_mode (bool): query the launches collection instead of fireworks
Returns:
list: list of firework ids matching the query
"""
coll = "launches" if launches_mode else "fireworks"
if launches_mode:
lids = self._get_active_launch_ids()
if fw_query is None:
fw_query = {}
fw_query["launch_id"] = {"$in": lids}
if count_only:
if limit:
return ValueError("Cannot count_only and limit at the same time!")
aggregation = []
if wf_query is not None:
aggregation.append(
{"$match": wf_query},
)
aggregation.extend(
[
{"$project": {"nodes": True, "_id": False}},
{"$unwind": "$nodes"},
{
"$lookup": {
"from": coll, # fireworks or launches
"localField": "nodes",
"foreignField": "fw_id",
"as": "fireworks",
}
},
{"$project": {"fireworks": 1, "_id": 0}},
{"$unwind": "$fireworks"},
{"$replaceRoot": {"newRoot": "$fireworks"}},
]
)
if fw_query is not None:
aggregation.append({"$match": fw_query})
if count_only:
aggregation.append({"$count": "count"})
self.m_logger.debug(f"Aggregation '{aggregation}'.")
cursor = self.workflows.aggregate(aggregation)
res = list(cursor)
return res[0]["count"] if len(res) > 0 else 0
if sort is not None:
aggregation.extend(sort_aggregation(sort))
aggregation.append({"$project": {"fw_id": True, "_id": False}})
if limit is not None and limit > 0:
aggregation.append({"$limit": limit})
self.m_logger.debug(f"Aggregation '{aggregation}'.")
cursor = self.workflows.aggregate(aggregation)
return [fw["fw_id"] for fw in cursor]
def run_exists(self, fworker=None):
"""
Checks to see if the database contains any FireWorks that are ready to run.
Returns:
bool: True if the database contains any FireWorks that are ready to run.
"""
q = fworker.query if fworker else {}
return bool(self._get_a_fw_to_run(query=q, checkout=False))
def future_run_exists(self, fworker=None):
"""Check if database has any current OR future Fireworks available
Returns:
bool: True if database has any ready or waiting Fireworks.
"""
if self.run_exists(fworker):
# check first to see if any are READY
return True
else:
# retrieve all [RUNNING/RESERVED] fireworks
q = fworker.query if fworker else {}
q.update({"state": {"$in": ["RUNNING", "RESERVED"]}})
active = self.get_fw_ids(q)
# then check if they have WAITING children
for fw_id in active:
children = self.get_wf_by_fw_id_lzyfw(fw_id).links[fw_id]
if any(self.get_fw_dict_by_id(i)["state"] == "WAITING" for i in children):
return True
# if we loop over all active and none have WAITING children
# there is no future work to do
return False
def tuneup(self, bkground=True):
"""
Database tuneup: build indexes
"""
self.m_logger.info("Performing db tune-up")
self.m_logger.debug("Updating indices...")
self.fireworks.create_index("fw_id", unique=True, background=bkground)
for f in ("state", "spec._category", "created_on", "updated_on", "name", "launches"):
self.fireworks.create_index(f, background=bkground)
self.launches.create_index("launch_id", unique=True, background=bkground)
self.launches.create_index("fw_id", background=bkground)
self.launches.create_index("state_history.reservation_id", background=bkground)
if GRIDFS_FALLBACK_COLLECTION is not None:
files_collection = self.db[f"{GRIDFS_FALLBACK_COLLECTION}.files"]
files_collection.create_index("metadata.launch_id", unique=True, background=bkground)
for f in ("state", "time_start", "time_end", "host", "ip", "fworker.name"):
self.launches.create_index(f, background=bkground)
for f in ("name", "created_on", "updated_on", "nodes"):
self.workflows.create_index(f, background=bkground)
for idx in self.user_indices:
self.fireworks.create_index(idx, background=bkground)
for idx in self.wf_user_indices:
self.workflows.create_index(idx, background=bkground)
# for frontend, which needs to sort on _id after querying on state
self.fireworks.create_index([("state", DESCENDING), ("_id", DESCENDING)], background=bkground)
self.fireworks.create_index(
[("state", DESCENDING), ("spec._priority", DESCENDING), ("created_on", DESCENDING)], background=bkground
)
self.fireworks.create_index(
[("state", DESCENDING), ("spec._priority", DESCENDING), ("created_on", ASCENDING)], background=bkground
)
self.workflows.create_index([("state", DESCENDING), ("_id", DESCENDING)], background=bkground)
if not bkground:
self.m_logger.debug("Compacting database...")
try:
self.db.command({"compact": "fireworks"})
self.db.command({"compact": "launches"})
except Exception:
self.m_logger.debug("Database compaction failed (not critical)")
def pause_fw(self, fw_id):
"""
Given the firework id, pauses the firework and refresh the workflow
Args:
fw_id(int): firework id
"""
allowed_states = ["WAITING", "READY", "RESERVED"]
f = self.fireworks.find_one_and_update(
{"fw_id": fw_id, "state": {"$in": allowed_states}},
{"$set": {"state": "PAUSED", "updated_on": datetime.datetime.utcnow()}},
)
if f:
self._refresh_wf(fw_id)
if not f:
self.m_logger.error(f"No pausable (WAITING,READY,RESERVED) Firework exists with fw_id: {fw_id}")
return f
def defuse_fw(self, fw_id, rerun_duplicates=True):
"""
Given the firework id, defuse the firework and refresh the workflow.
Args:
fw_id (int): firework id
rerun_duplicates (bool): if True, duplicate fireworks(ones with the same launch) are
marked for rerun and then defused.
"""
allowed_states = ["DEFUSED", "WAITING", "READY", "FIZZLED", "PAUSED"]
f = self.fireworks.find_one_and_update(
{"fw_id": fw_id, "state": {"$in": allowed_states}},
{"$set": {"state": "DEFUSED", "updated_on": datetime.datetime.utcnow()}},
)
if f:
self._refresh_wf(fw_id)
if not f:
self.rerun_fw(fw_id, rerun_duplicates)
f = self.fireworks.find_one_and_update(
{"fw_id": fw_id, "state": {"$in": allowed_states}},
{"$set": {"state": "DEFUSED", "updated_on": datetime.datetime.utcnow()}},
)
if f:
self._refresh_wf(fw_id)
return f
def reignite_fw(self, fw_id):
"""
Given the firework id, re-ignite(set state=WAITING) the defused firework.
Args:
fw_id (int): firework id
"""
f = self.fireworks.find_one_and_update(
{"fw_id": fw_id, "state": "DEFUSED"},
{"$set": {"state": "WAITING", "updated_on": datetime.datetime.utcnow()}},
)
if f:
self._refresh_wf(fw_id)
return f
def resume_fw(self, fw_id):
"""
Given the firework id, resume (set state=WAITING) the paused firework.
Args:
fw_id (int): firework id
"""
f = self.fireworks.find_one_and_update(
{"fw_id": fw_id, "state": "PAUSED"},
{"$set": {"state": "WAITING", "updated_on": datetime.datetime.utcnow()}},
)
if f:
self._refresh_wf(fw_id)
return f
def defuse_wf(self, fw_id, defuse_all_states=True):
"""
Defuse the workflow containing the given firework id.
Args:
fw_id (int): firework id
defuse_all_states (bool)
"""
wf = self.get_wf_by_fw_id_lzyfw(fw_id)
for fw in wf:
if fw.state not in ["COMPLETED", "FIZZLED"] or defuse_all_states:
self.defuse_fw(fw.fw_id)
def pause_wf(self, fw_id):
"""
Pause the workflow containing the given firework id.
Args:
fw_id (int): firework id
"""
wf = self.get_wf_by_fw_id_lzyfw(fw_id)
for fw in wf:
if fw.state not in ["COMPLETED", "FIZZLED", "DEFUSED"]:
self.pause_fw(fw.fw_id)
def reignite_wf(self, fw_id):
"""
Reignite the workflow containing the given firework id.
Args:
fw_id (int): firework id
"""
wf = self.get_wf_by_fw_id_lzyfw(fw_id)
for fw in wf:
self.reignite_fw(fw.fw_id)
def archive_wf(self, fw_id):
"""
Archive the workflow containing the given firework id.
Args:
fw_id (int): firework id
"""
# first archive all the launches, so they are not used in duplicate checks
wf = self.get_wf_by_fw_id_lzyfw(fw_id)
if wf.state != "ARCHIVED":
fw_ids = [f.fw_id for f in wf]
for fw_id in fw_ids:
self.rerun_fw(fw_id)
# second set the state of all FWs to ARCHIVED
wf = self.get_wf_by_fw_id_lzyfw(fw_id)
for fw in wf:
self.fireworks.find_one_and_update(
{"fw_id": fw.fw_id}, {"$set": {"state": "ARCHIVED", "updated_on": datetime.datetime.utcnow()}}
)
self._refresh_wf(fw.fw_id)
def _restart_ids(self, next_fw_id, next_launch_id):
"""
internal method used to reset firework id counters.
Args:
next_fw_id (int): id to give next Firework
next_launch_id (int): id to give next Launch
"""
self.fw_id_assigner.delete_many({})
self.fw_id_assigner.find_one_and_replace(
{"_id": -1}, {"next_fw_id": next_fw_id, "next_launch_id": next_launch_id}, upsert=True
)
self.m_logger.debug(f"RESTARTED fw_id, launch_id to ({next_fw_id}, {next_launch_id})")
def _check_fw_for_uniqueness(self, m_fw):
"""
Check if there are duplicates. If not unique, a new id is assigned and the workflow
refreshed.
Args:
m_fw (Firework)
Returns:
bool: True if the firework is unique
"""
if not self._steal_launches(m_fw):
self.m_logger.debug(f"FW with id: {m_fw.fw_id} is unique!")
return True
self._upsert_fws([m_fw]) # update the DB with the new launches
self._refresh_wf(m_fw.fw_id) # since we updated a state, we need to refresh the WF again
return False
def _get_a_fw_to_run(self, query=None, fw_id=None, checkout=True):
"""
Get the next ready firework to run.
Args:
query (dict)
fw_id (int): If given the query is updated.
Note: We want to return None if this specific FW doesn't exist anymore. This is
because our queue params might have been tailored to this FW.
checkout (bool): if True, check out the matching firework and set state=RESERVED
Returns:
Firework
"""
m_query = dict(query) if query else {} # make a defensive copy
m_query["state"] = "READY"
sortby = [("spec._priority", DESCENDING)]
if SORT_FWS.upper() == "FIFO":
sortby.append(("created_on", ASCENDING))
elif SORT_FWS.upper() == "FILO":
sortby.append(("created_on", DESCENDING))
# Override query if fw_id defined
if fw_id:
m_query = {"fw_id": fw_id, "state": {"$in": ["READY", "RESERVED"]}}
while True:
# check out the matching firework, depending on the query set by the FWorker
if checkout:
m_fw = self.fireworks.find_one_and_update(
m_query, {"$set": {"state": "RESERVED", "updated_on": datetime.datetime.utcnow()}}, sort=sortby
)
else:
m_fw = self.fireworks.find_one(m_query, {"fw_id": 1, "spec": 1}, sort=sortby)
if not m_fw:
return None
m_fw = self.get_fw_by_id(m_fw["fw_id"])
if self._check_fw_for_uniqueness(m_fw):
return m_fw
def _get_active_launch_ids(self):
"""
Get all the launch ids.
Returns:
list: all launch ids
"""
all_launch_ids = []
for l in self.fireworks.find({}, {"launches": 1}):
all_launch_ids.extend(l["launches"])
return all_launch_ids
def reserve_fw(self, fworker, launch_dir, host=None, ip=None, fw_id=None):
"""
Checkout the next ready firework and mark the launch reserved.
Args:
fworker (FWorker)
launch_dir (str): path to the launch directory.
host (str): hostname
ip (str): ip address
fw_id (int): fw_id to be reserved, if desired
Returns:
(Firework, int): the checked out firework and the new launch id
"""
return self.checkout_fw(fworker, launch_dir, host=host, ip=ip, fw_id=fw_id, state="RESERVED")
def get_fw_ids_from_reservation_id(self, reservation_id):
"""
Given the reservation id, return the list of firework ids.
Args:
reservation_id (int)
Returns:
[int]: list of firework ids.
"""
fw_ids = []
l_id = self.launches.find_one({"state_history.reservation_id": reservation_id}, {"launch_id": 1})["launch_id"]
for fw in self.fireworks.find({"launches": l_id}, {"fw_id": 1}):
fw_ids.append(fw["fw_id"])
return fw_ids
def cancel_reservation_by_reservation_id(self, reservation_id):
"""
Given the reservation id, cancel the reservation and rerun the corresponding fireworks.
"""
l_id = self.launches.find_one(
{"state_history.reservation_id": reservation_id, "state": "RESERVED"}, {"launch_id": 1}
)
if l_id:
self.cancel_reservation(l_id["launch_id"])
else:
self.m_logger.info(f"Can't find any reserved jobs with reservation id: {reservation_id}")
def get_reservation_id_from_fw_id(self, fw_id):
"""
Given the firework id, return the reservation id
"""
fw = self.fireworks.find_one({"fw_id": fw_id}, {"launches": 1})
if fw:
for l in self.launches.find({"launch_id": {"$in": fw["launches"]}}, {"state_history": 1}):
for d in l["state_history"]:
if "reservation_id" in d:
return d["reservation_id"]
def cancel_reservation(self, launch_id):
"""
given the launch id, cancel the reservation and rerun the fireworks
"""
m_launch = self.get_launch_by_id(launch_id)
m_launch.state = "READY"
self.launches.find_one_and_replace(
{"launch_id": m_launch.launch_id, "state": "RESERVED"}, m_launch.to_db_dict(), upsert=True
)
for fw in self.fireworks.find({"launches": launch_id, "state": "RESERVED"}, {"fw_id": 1}):
self.rerun_fw(fw["fw_id"], rerun_duplicates=False)
def detect_unreserved(self, expiration_secs=RESERVATION_EXPIRATION_SECS, rerun=False):
"""
Return the reserved launch ids that have not been updated for a while.
Args:
expiration_secs (seconds): time limit
rerun (bool): if True, the expired reservations are cancelled and the fireworks rerun.
Returns:
[int]: list of expired launch ids
"""
bad_launch_ids = []
now_time = datetime.datetime.utcnow()
cutoff_timestr = (now_time - datetime.timedelta(seconds=expiration_secs)).isoformat()
bad_launch_data = self.launches.find(
{
"state": "RESERVED",
"state_history": {"$elemMatch": {"state": "RESERVED", "updated_on": {"$lte": cutoff_timestr}}},
},
{"launch_id": 1, "fw_id": 1},
)
for ld in bad_launch_data:
if self.fireworks.find_one({"fw_id": ld["fw_id"], "state": "RESERVED"}, {"fw_id": 1}):
bad_launch_ids.append(ld["launch_id"])
if rerun:
for lid in bad_launch_ids:
self.cancel_reservation(lid)
return bad_launch_ids
def mark_fizzled(self, launch_id):
"""
Mark the launch corresponding to the given id as FIZZLED.
Args:
launch_id (int): launch id
Returns:
dict: updated launch
"""
# Do a confirmed write and make sure state_history is preserved
self.complete_launch(launch_id, state="FIZZLED")
def detect_lostruns(
self,
expiration_secs=RUN_EXPIRATION_SECS,
fizzle=False,
rerun=False,
max_runtime=None,
min_runtime=None,
refresh=False,
query=None,
launch_query=None,
):
"""
Detect lost runs i.e running fireworks that haven't been updated within the specified
time limit or running firework whose launch has been marked fizzed or completed.
Args:
expiration_secs (seconds): expiration time in seconds
fizzle (bool): if True, mark the lost runs fizzed
rerun (bool): if True, mark the lost runs fizzed and rerun
max_runtime (seconds): maximum run time
min_runtime (seconds): minimum run time
refresh (bool): if True, refresh the workflow with inconsistent fireworks.
query (dict): restrict search to FWs matching this query
launch_query (dict): restrict search to launches matching this query (e.g. host restriction)
Returns:
([int], [int], [int]): tuple of list of lost launch ids, lost firework ids and
inconsistent firework ids.
"""
lost_launch_ids = []
lost_fw_ids = []
potential_lost_fw_ids = []
now_time = datetime.datetime.utcnow()
cutoff_timestr = (now_time - datetime.timedelta(seconds=expiration_secs)).isoformat()
lostruns_query = launch_query or {}
lostruns_query["state"] = "RUNNING"
lostruns_query["state_history"] = {"$elemMatch": {"state": "RUNNING", "updated_on": {"$lte": cutoff_timestr}}}
if query:
fw_ids = [x["fw_id"] for x in self.fireworks.find(query, {"fw_id": 1})]
lostruns_query["fw_id"] = {"$in": fw_ids}
bad_launch_data = self.launches.find(lostruns_query, {"launch_id": 1, "fw_id": 1})
for ld in bad_launch_data:
bad_launch = True
if max_runtime or min_runtime:
bad_launch = False
m_l = self.get_launch_by_id(ld["launch_id"])
utime = m_l._get_time("RUNNING", use_update_time=True)
ctime = m_l._get_time("RUNNING", use_update_time=False)
if (not max_runtime or (utime - ctime).seconds <= max_runtime) and (
not min_runtime or (utime - ctime).seconds >= min_runtime
):
bad_launch = True
if bad_launch:
lost_launch_ids.append(ld["launch_id"])
potential_lost_fw_ids.append(ld["fw_id"])
for fw_id in potential_lost_fw_ids: # tricky: figure out what's actually lost
f = self.fireworks.find_one({"fw_id": fw_id}, {"launches": 1, "state": 1})
# only RUNNING FireWorks can be "lost", i.e. not defused or archived
if f["state"] == "RUNNING":
l_ids = f["launches"]
not_lost = [x for x in l_ids if x not in lost_launch_ids]
if len(not_lost) == 0: # all launches are lost - we are lost!
lost_fw_ids.append(fw_id)
else:
for l_id in not_lost:
l_state = self.launches.find_one({"launch_id": l_id}, {"state": 1})["state"]
if Firework.STATE_RANKS[l_state] > Firework.STATE_RANKS["FIZZLED"]:
break
else:
lost_fw_ids.append(fw_id) # all Launches not lost are anyway FIZZLED / ARCHIVED
if fizzle or rerun:
for lid in lost_launch_ids:
self.mark_fizzled(lid)
# for offline runs, you want to forget about the run
# see: https://groups.google.com/forum/#!topic/fireworkflows/oimFmE5tZ4E
offline_run = self.offline_runs.count_documents({"launch_id": lid, "deprecated": False}) > 0
if offline_run:
self.forget_offline(lid, launch_mode=True)
if rerun:
fw_id = self.launches.find_one({"launch_id": lid}, {"fw_id": 1})["fw_id"]
if fw_id in lost_fw_ids:
self.rerun_fw(fw_id)
inconsistent_fw_ids = []
inconsistent_query = query or {}
inconsistent_query["state"] = "RUNNING"
running_fws = self.fireworks.find(inconsistent_query, {"fw_id": 1, "launches": 1})
for fw in running_fws:
if self.launches.find_one(
{"launch_id": {"$in": fw["launches"]}, "state": {"$in": ["FIZZLED", "COMPLETED"]}}
):
inconsistent_fw_ids.append(fw["fw_id"])
if refresh:
self._refresh_wf(fw["fw_id"])
return lost_launch_ids, lost_fw_ids, inconsistent_fw_ids
def set_reservation_id(self, launch_id, reservation_id):
"""
Set reservation id to the launch corresponding to the given launch id.
Args:
launch_id (int)
reservation_id (int)
"""
m_launch = self.get_launch_by_id(launch_id)
m_launch.set_reservation_id(reservation_id)
self.launches.find_one_and_replace({"launch_id": launch_id}, m_launch.to_db_dict())
def checkout_fw(self, fworker, launch_dir, fw_id=None, host=None, ip=None, state="RUNNING"):
"""
Checkout the next ready firework, mark it with the given state(RESERVED or RUNNING) and
return it to the caller. The caller is responsible for running the Firework.
Args:
fworker (FWorker): A FWorker instance
launch_dir (str): the dir the FW will be run in (for creating a Launch object)
fw_id (int): Firework id
host (str): the host making the request (for creating a Launch object)
ip (str): the ip making the request (for creating a Launch object)
state (str): RESERVED or RUNNING, the fetched firework's state will be set to this value.
Returns:
(Firework, int): firework and the new launch id
"""
m_fw = self._get_a_fw_to_run(fworker.query, fw_id=fw_id)
if not m_fw:
return None, None
# If this Launch was previously reserved, overwrite that reservation with this Launch
# note that adding a new Launch is problematic from a duplicate run standpoint
prev_reservations = [l for l in m_fw.launches if l.state == "RESERVED"]
reserved_launch = None if not prev_reservations else prev_reservations[0]
state_history = reserved_launch.state_history if reserved_launch else None
# get new launch
launch_id = reserved_launch.launch_id if reserved_launch else self.get_new_launch_id()
trackers = [Tracker.from_dict(f) for f in m_fw.spec["_trackers"]] if "_trackers" in m_fw.spec else None
m_launch = Launch(
state,
launch_dir,
fworker,
host,
ip,
trackers=trackers,
state_history=state_history,
launch_id=launch_id,
fw_id=m_fw.fw_id,
)
# insert the launch
self.launches.find_one_and_replace({"launch_id": m_launch.launch_id}, m_launch.to_db_dict(), upsert=True)
self.m_logger.debug(f"Created/updated Launch with launch_id: {launch_id}")
# update the firework's launches
if not reserved_launch:
# we're appending a new Firework
m_fw.launches.append(m_launch)
else:
# we're updating an existing launch
m_fw.launches = [m_launch if l.launch_id == m_launch.launch_id else l for l in m_fw.launches]
# insert the firework and refresh the workflow
m_fw.state = state
self._upsert_fws([m_fw])
self._refresh_wf(m_fw.fw_id)
# update any duplicated runs
if state == "RUNNING":
for fw in self.fireworks.find(
{"launches": launch_id, "state": {"$in": ["WAITING", "READY", "RESERVED", "FIZZLED"]}}, {"fw_id": 1}
):
fw_id = fw["fw_id"]
fw = self.get_fw_by_id(fw_id)
fw.state = state
self._upsert_fws([fw])
self._refresh_wf(fw.fw_id)
# Store backup copies of the initial data for retrieval in case of failure
self.backup_launch_data[m_launch.launch_id] = m_launch.to_db_dict()
self.backup_fw_data[fw_id] = m_fw.to_db_dict()
self.m_logger.debug(f"{m_fw.state} FW with id: {m_fw.fw_id}")
return m_fw, launch_id
def change_launch_dir(self, launch_id, launch_dir):
"""
Change the launch directory corresponding to the given launch id.
Args:
launch_id (int)
launch_dir (str): path to the new launch directory.
"""
m_launch = self.get_launch_by_id(launch_id)
m_launch.launch_dir = launch_dir
self.launches.find_one_and_replace({"launch_id": m_launch.launch_id}, m_launch.to_db_dict(), upsert=True)
def restore_backup_data(self, launch_id, fw_id):
"""
For the given launch id and firework id, restore the back up data.
"""
if launch_id in self.backup_launch_data:
self.launches.find_one_and_replace({"launch_id": launch_id}, self.backup_launch_data[launch_id])
if fw_id in self.backup_fw_data:
self.fireworks.find_one_and_replace({"fw_id": fw_id}, self.backup_fw_data[fw_id])
def complete_launch(self, launch_id, action=None, state="COMPLETED"):
"""
Internal method used to mark a Firework's Launch as completed.
Args:
launch_id (int)
action (FWAction): the FWAction of what to do next
state (str): COMPLETED or FIZZLED
Returns:
dict: updated launch
"""
# update the launch data to COMPLETED, set end time, etc
m_launch = self.get_launch_by_id(launch_id)
m_launch.state = state
if action:
m_launch.action = action
try:
self.launches.find_one_and_replace({"launch_id": m_launch.launch_id}, m_launch.to_db_dict(), upsert=True)
except DocumentTooLarge as err:
launch_db_dict = m_launch.to_db_dict()
action_dict = launch_db_dict.get("action", None)
if not action_dict:
# in case the action is empty and it is not the source of
# the error, raise the exception again.
raise
if self.gridfs_fallback is None:
err.args = (
err.args[0] + ". Set GRIDFS_FALLBACK_COLLECTION in FW_config.yaml"
" to a value different from None",
)
raise err
# encoding required for python2/3 compatibility.
action_id = self.gridfs_fallback.put(
json.dumps(action_dict), encoding="utf-8", metadata={"launch_id": launch_id}
)
launch_db_dict["action"] = {"gridfs_id": str(action_id)}
self.m_logger.warning("The size of the launch document was too large. Saving the action in gridfs.")
self.launches.find_one_and_replace({"launch_id": m_launch.launch_id}, launch_db_dict, upsert=True)
# find all the fws that have this launch
for fw in self.fireworks.find({"launches": launch_id}, {"fw_id": 1}):
fw_id = fw["fw_id"]
self._refresh_wf(fw_id)
# change return type to dict to make return type serializable to support job packing
return m_launch.to_dict()
def ping_launch(self, launch_id, ptime=None, checkpoint=None):
"""
Ping that a Launch is still alive: updates the 'update_on 'field of the state history of a
Launch.
Args:
launch_id (int)
ptime (datetime)
"""
m_launch = self.get_launch_by_id(launch_id)
for tracker in m_launch.trackers:
tracker.track_file(m_launch.launch_dir)
m_launch.touch_history(ptime, checkpoint=checkpoint)
self.launches.update_one(
{"launch_id": launch_id, "state": "RUNNING"},
{
"$set": {
"state_history": m_launch.to_db_dict()["state_history"],
"trackers": [t.to_dict() for t in m_launch.trackers],
}
},
)
def get_new_fw_id(self, quantity=1):
"""
Checkout the next Firework id
Args:
quantity (int): optionally ask for many ids, otherwise defaults to 1
this then returns the *first* fw_id in that range
"""
try:
return self.fw_id_assigner.find_one_and_update({}, {"$inc": {"next_fw_id": quantity}})["next_fw_id"]
except Exception:
raise ValueError(
"Could not get next FW id! If you have not yet initialized the database,"
" please do so by performing a database reset (e.g., lpad reset)"
)
def get_new_launch_id(self):
"""
Checkout the next Launch id
"""
try:
return self.fw_id_assigner.find_one_and_update({}, {"$inc": {"next_launch_id": 1}})["next_launch_id"]
except Exception:
raise ValueError(
"Could not get next launch id! If you have not yet initialized the "
"database, please do so by performing a database reset (e.g., lpad reset)"
)
def _upsert_fws(self, fws, reassign_all=False):
"""
Insert the fireworks to the 'fireworks' collection.
Args:
fws ([Firework]): list of fireworks
reassign_all (bool): if True, reassign the firework ids. The ids are also reassigned
if the current firework ids are negative.
Returns:
dict: mapping between old and new Firework ids
"""
old_new = {}
# sort the FWs by id, then the new FW_ids will match the order of the old ones...
fws.sort(key=lambda x: x.fw_id)
if reassign_all:
used_ids = []
# we can request multiple fw_ids up front
# this is the FIRST fw_id we should use
first_new_id = self.get_new_fw_id(quantity=len(fws))
for new_id, fw in enumerate(fws, start=first_new_id):
old_new[fw.fw_id] = new_id
fw.fw_id = new_id
used_ids.append(new_id)
# delete/add in bulk
self.fireworks.delete_many({"fw_id": {"$in": used_ids}})
self.fireworks.insert_many(fw.to_db_dict() for fw in fws)
else:
for fw in fws:
if fw.fw_id < 0:
new_id = self.get_new_fw_id()
old_new[fw.fw_id] = new_id
fw.fw_id = new_id
self.fireworks.find_one_and_replace({"fw_id": fw.fw_id}, fw.to_db_dict(), upsert=True)
return old_new
def rerun_fw(self, fw_id, rerun_duplicates=True, recover_launch=None, recover_mode=None):
"""
Rerun the firework corresponding to the given id.
Args:
fw_id (int): firework id
rerun_duplicates (bool): flag for whether duplicates should be rerun
recover_launch ('last' or int): launch_id for last recovery, if set to
'last' (default), recovery will find the last available launch.
If it is an int, will recover that specific launch
recover_mode ('prev_dir' or 'cp'): flag to indicate whether to copy
or run recovery fw in previous directory
Returns:
[int]: list of firework ids that were rerun
"""
m_fw = self.fireworks.find_one({"fw_id": fw_id}, {"state": 1})
if not m_fw:
raise ValueError(f"FW with id: {fw_id} not found!")
# detect FWs that share the same launch. Must do this before rerun
duplicates = []
reruns = []
if rerun_duplicates:
f = self.fireworks.find_one({"fw_id": fw_id, "spec._dupefinder": {"$exists": True}}, {"launches": 1})
if f:
for d in self.fireworks.find(
{"launches": {"$in": f["launches"]}, "fw_id": {"$ne": fw_id}}, {"fw_id": 1}
):
duplicates.append(d["fw_id"])
duplicates = list(set(duplicates))
# Launch recovery
if recover_launch is not None:
recovery = self.get_recovery(fw_id, recover_launch)
recovery.update({"_mode": recover_mode})
set_spec = recursive_dict({"$set": {"spec._recovery": recovery}})
if recover_mode == "prev_dir":
prev_dir = self.get_launch_by_id(recovery.get("_launch_id")).launch_dir
set_spec["$set"]["spec._launch_dir"] = prev_dir
self.fireworks.find_one_and_update({"fw_id": fw_id}, set_spec)
# If no launch recovery specified, unset the firework recovery spec
else:
set_spec = {"$unset": {"spec._recovery": ""}}
self.fireworks.find_one_and_update({"fw_id": fw_id}, set_spec)
# rerun this FW
if m_fw["state"] in ["ARCHIVED", "DEFUSED"]:
self.m_logger.info(f"Cannot rerun fw_id: {fw_id}: it is {m_fw['state']}.")
elif m_fw["state"] == "WAITING" and not recover_launch:
self.m_logger.debug(f"Skipping rerun fw_id: {fw_id}: it is already WAITING.")
else:
with WFLock(self, fw_id):
wf = self.get_wf_by_fw_id_lzyfw(fw_id)
updated_ids = wf.rerun_fw(fw_id)
self._update_wf(wf, updated_ids)
reruns.append(fw_id)
# rerun duplicated FWs
for f in duplicates:
self.m_logger.info(f"Also rerunning duplicate fw_id: {f}")
# False for speed, True shouldn't be needed
r = self.rerun_fw(f, rerun_duplicates=False, recover_launch=recover_launch, recover_mode=recover_mode)
reruns.extend(r)
return reruns
def get_recovery(self, fw_id, launch_id="last"):
"""
function to get recovery data for a given fw and launch
Args:
fw_id (int): fw id to get recovery data for
launch_id (int or 'last'): launch_id to get recovery data for, if 'last'
recovery data is generated from last launch
"""
m_fw = self.get_fw_by_id(fw_id)
if launch_id == "last":
launch = m_fw.launches[-1]
else:
launch = self.get_launch_by_id(launch_id)
recovery = launch.state_history[-1].get("checkpoint")
recovery.update({"_prev_dir": launch.launch_dir, "_launch_id": launch.launch_id})
return recovery
def _refresh_wf(self, fw_id):
"""
Update the FW state of all jobs in workflow.
Args:
fw_id (int): the parent fw_id - children will be refreshed
"""
# TODO: time how long it took to refresh the WF!
# TODO: need a try-except here, high probability of failure if incorrect action supplied
try:
with WFLock(self, fw_id):
wf = self.get_wf_by_fw_id_lzyfw(fw_id)
updated_ids = wf.refresh(fw_id)
self._update_wf(wf, updated_ids)
except LockedWorkflowError:
self.m_logger.info(f"fw_id {fw_id} locked. Can't refresh!")
except Exception:
# some kind of internal error - an example is that fws serialization changed due to
# code updates and thus the Firework object can no longer be loaded from db description
# Action: *manually* mark the fw and workflow as FIZZLED
self.fireworks.find_one_and_update({"fw_id": fw_id}, {"$set": {"state": "FIZZLED"}})
self.workflows.find_one_and_update({"nodes": fw_id}, {"$set": {"state": "FIZZLED"}})
self.workflows.find_one_and_update({"nodes": fw_id}, {"$set": {f"fw_states.{fw_id}": "FIZZLED"}})
import traceback
err_message = f"Error refreshing workflow. The full stack trace is: {traceback.format_exc()}"
raise RuntimeError(err_message)
def _update_wf(self, wf, updated_ids):
"""
Update the workflow with the update firework ids.
Note: must be called within an enclosing WFLock
Args:
wf (Workflow)
updated_ids ([int]): list of firework ids
"""
updated_fws = [wf.id_fw[fid] for fid in updated_ids]
old_new = self._upsert_fws(updated_fws)
wf._reassign_ids(old_new)
# find a node for which the id did not change, so we can query on it to get WF
query_node = None
for f in wf.id_fw:
if f not in old_new.values() or old_new.get(f, None) == f:
query_node = f
break
assert query_node is not None
if not self.workflows.find_one({"nodes": query_node}):
raise ValueError(f"BAD QUERY_NODE! {query_node}")
# redo the links and fw_states
wf = wf.to_db_dict()
wf["locked"] = True # preserve the lock!
self.workflows.find_one_and_replace({"nodes": query_node}, wf)
def _steal_launches(self, thief_fw):
"""
Check if there are duplicates. If there are duplicates, the matching firework's launches
are added to the launches of the given firework.
Returns:
bool: False if the given firework is unique
"""
stolen = False
if thief_fw.state in ["READY", "RESERVED"] and "_dupefinder" in thief_fw.spec:
m_dupefinder = thief_fw.spec["_dupefinder"]
# get the query that will limit the number of results to check as duplicates
m_query = m_dupefinder.query(thief_fw.to_dict()["spec"])
self.m_logger.debug(f"Querying for duplicates, fw_id: {thief_fw.fw_id}")
# iterate through all potential duplicates in the DB
for potential_match in self.fireworks.find(m_query):
self.m_logger.debug(f"Verifying for duplicates, fw_ids: {thief_fw.fw_id}, {potential_match['fw_id']}")
# see if verification is needed, as this slows the process
verified = False
try:
m_dupefinder.verify({}, {}) # is implemented test
except NotImplementedError:
verified = True # no dupefinder.verify() implemented, skip verification
except Exception:
# we want to catch any exceptions from testing an empty dict, which the dupefinder might not be
# designed for
pass
if not verified:
# dupefinder.verify() is implemented, let's call verify()
spec1 = dict(thief_fw.to_dict()["spec"]) # defensive copy
spec2 = dict(potential_match["spec"]) # defensive copy
verified = m_dupefinder.verify(spec1, spec2)
if verified:
# steal the launches
victim_fw = self.get_fw_by_id(potential_match["fw_id"])
thief_launches = [l.launch_id for l in thief_fw.launches]
valuable_launches = [l for l in victim_fw.launches if l.launch_id not in thief_launches]
for launch in valuable_launches:
thief_fw.launches.append(launch)
stolen = True
self.m_logger.info(f"Duplicate found! fwids {thief_fw.fw_id} and {potential_match['fw_id']}")
return stolen
def set_priority(self, fw_id, priority):
"""
Set priority to the firework with the given id.
Args:
fw_id (int): firework id
priority
"""
self.fireworks.find_one_and_update({"fw_id": fw_id}, {"$set": {"spec._priority": priority}})
def get_logdir(self):
"""
Return the log directory.
AJ: This is needed for job packing due to Proxy objects not being fully featured...
"""
return self.logdir
def add_offline_run(self, launch_id, fw_id, name):
"""
Add the launch and firework to the offline_run collection.
Args:
launch_id (int): launch id
fw_id (id): firework id
name (str)
"""
d = {"fw_id": fw_id}
d["launch_id"] = launch_id
d["name"] = name
d["created_on"] = datetime.datetime.utcnow().isoformat()
d["updated_on"] = datetime.datetime.utcnow().isoformat()
d["deprecated"] = False
d["completed"] = False
self.offline_runs.insert_one(d)
def recover_offline(self, launch_id, ignore_errors=False, print_errors=False):
"""
Update the launch state using the offline data in FW_offline.json file.
Args:
launch_id (int): launch id
ignore_errors (bool)
print_errors (bool)
Returns:
firework id if the recovering fails otherwise None
"""
# get the launch directory
m_launch = self.get_launch_by_id(launch_id)
try:
self.m_logger.debug(f"RECOVERING fw_id: {m_launch.fw_id}")
offline_loc = zpath(os.path.join(m_launch.launch_dir, "FW_offline.json"))
offline_data = loadfn(offline_loc)
if "started_on" in offline_data: # started running at some point
already_running = False
for s in m_launch.state_history:
if s["state"] == "RUNNING":
s["created_on"] = reconstitute_dates(offline_data["started_on"])
already_running = True
if not already_running:
m_launch.state = "RUNNING" # this should also add a history item
checkpoint = offline_data["checkpoint"] if "checkpoint" in offline_data else None
# look for ping file - update the Firework if this is the case
ping_loc = os.path.join(m_launch.launch_dir, "FW_ping.json")
if os.path.exists(ping_loc):
ping_dict = loadfn(ping_loc)
self.ping_launch(launch_id, ptime=ping_dict["ping_time"], checkpoint=checkpoint)
else:
warnings.warn(
f"Unable to find FW_ping.json in {m_launch.launch_dir}! State history updated_on might be "
"incorrect, trackers may not update."
)
m_launch.touch_history(checkpoint=checkpoint)
if "fwaction" in offline_data:
fwaction = FWAction.from_dict(offline_data["fwaction"])
m_launch.state = offline_data["state"]
self.launches.find_one_and_replace(
{"launch_id": m_launch.launch_id}, m_launch.to_db_dict(), upsert=True
)
m_launch = Launch.from_dict(self.complete_launch(launch_id, fwaction, m_launch.state))
for s in m_launch.state_history:
if s["state"] == offline_data["state"]:
s["created_on"] = reconstitute_dates(offline_data["completed_on"])
self.launches.find_one_and_update(
{"launch_id": m_launch.launch_id}, {"$set": {"state_history": m_launch.state_history}}
)
self.offline_runs.update_one({"launch_id": launch_id}, {"$set": {"completed": True}})
else:
l = self.launches.find_one_and_replace(
{"launch_id": m_launch.launch_id}, m_launch.to_db_dict(), upsert=True
)
fw_id = l["fw_id"]
f = self.fireworks.find_one_and_update(
{"fw_id": fw_id}, {"$set": {"state": "RUNNING", "updated_on": datetime.datetime.utcnow()}}
)
if f:
self._refresh_wf(fw_id)
# update the updated_on
self.offline_runs.update_one(
{"launch_id": launch_id}, {"$set": {"updated_on": datetime.datetime.utcnow().isoformat()}}
)
return None
except Exception:
if print_errors:
self.m_logger.error(f"failed recovering launch_id {launch_id}.\n{traceback.format_exc()}")
if not ignore_errors:
traceback.print_exc()
m_action = FWAction(
stored_data={
"_message": "runtime error during task",
"_task": None,
"_exception": {"_stacktrace": traceback.format_exc(), "_details": None},
},
exit=True,
)
self.complete_launch(launch_id, m_action, "FIZZLED")
self.offline_runs.update_one({"launch_id": launch_id}, {"$set": {"completed": True}})
return m_launch.fw_id
def forget_offline(self, launchid_or_fwid, launch_mode=True):
"""
Unmark the offline run for the given launch or firework id.
Args:
launchid_or_fwid (int): launch od or firework id
launch_mode (bool): if True then launch id is given.
"""
q = {"launch_id": launchid_or_fwid} if launch_mode else {"fw_id": launchid_or_fwid}
self.offline_runs.update_many(q, {"$set": {"deprecated": True}})
def get_tracker_data(self, fw_id):
"""
Args:
fw_id (id): firework id
Returns:
[dict]: list tracker dicts
"""
data = []
for l in self.launches.find({"fw_id": fw_id}, {"trackers": 1, "launch_id": 1}):
if "trackers" in l: # backwards compatibility
trackers = [Tracker.from_dict(t) for t in l["trackers"]]
data.append({"launch_id": l["launch_id"], "trackers": trackers})
return data
def get_launchdir(self, fw_id, launch_idx=-1):
"""
Returns the directory of the *most recent* launch of a fw_id
Args:
fw_id: (int) fw_id to get launch id for
launch_idx: (int) index of the launch to get. Default is -1, which is most recent.
"""
fw = self.get_fw_by_id(fw_id)
return fw.launches[launch_idx].launch_dir if len(fw.launches) > 0 else None
def log_message(self, level, message):
"""
Support for job packing
Args:
level (str)
message (str)
"""
self.m_logger.log(level, message)
class LazyFirework:
"""
A LazyFirework only has the fw_id, and retrieves other data just-in-time.
This representation can speed up Workflow loading as only "important" FWs need to be
fully loaded.
"""
# Get these fields from DB when creating new FireWork object
db_fields = ("name", "fw_id", "spec", "created_on", "state")
db_launch_fields = ("launches", "archived_launches")
def __init__(self, fw_id, fw_coll, launch_coll, fallback_fs):
"""
Args:
fw_id (int): firework id
fw_coll (pymongo.collection): fireworks collection
launch_coll (pymongo.collection): launches collection
"""
# This is the only attribute known w/o a DB query
self.fw_id = fw_id
self._fwc, self._lc, self._ffs = fw_coll, launch_coll, fallback_fs
self._launches = {k: False for k in self.db_launch_fields}
self._fw, self._lids, self._state = None, None, None
# FireWork methods
# Treat state as special case as it is always required when accessing a Firework lazily
# If the partial fw is not available the state is fetched independently
@property
def state(self):
if self._fw is not None:
self._state = self._fw.state
elif self._state is None:
self._state = self._fwc.find_one({"fw_id": self.fw_id}, projection=["state"])["state"]
return self._state
@state.setter
def state(self, state):
self.partial_fw._state = state
self.partial_fw.updated_on = datetime.datetime.utcnow()
def to_dict(self):
return self.full_fw.to_dict()
def _rerun(self):
self.full_fw._rerun()
def to_db_dict(self):
return self.full_fw.to_db_dict()
def __str__(self):
return f"LazyFireWork object: (id: {self.fw_id})"
# Properties that shadow FireWork attributes
@property
def tasks(self):
return self.partial_fw.tasks
@tasks.setter
def tasks(self, value):
self.partial_fw.tasks = value
@property
def spec(self):
return self.partial_fw.spec
@spec.setter
def spec(self, value):
self.partial_fw.spec = value
@property
def name(self):
return self.partial_fw.name
@name.setter
def name(self, value):
self.partial_fw.name = value
@property
def created_on(self):
return self.partial_fw.created_on
@created_on.setter
def created_on(self, value):
self.partial_fw.created_on = value
@property
def updated_on(self):
return self.partial_fw.updated_on
@updated_on.setter
def updated_on(self, value):
self.partial_fw.updated_on = value
@property
def parents(self):
if self._fw is not None:
return self.partial_fw.parents
else:
return []
@parents.setter
def parents(self, value):
self.partial_fw.parents = value
# Properties that shadow FireWork attributes, but which are
# fetched individually from the DB (i.e. launch objects)
@property
def launches(self):
return self._get_launch_data("launches")
@launches.setter
def launches(self, value):
self._launches["launches"] = True
self.partial_fw.launches = value
@property
def archived_launches(self):
return self._get_launch_data("archived_launches")
@archived_launches.setter
def archived_launches(self, value):
self._launches["archived_launches"] = True
self.partial_fw.archived_launches = value
# Lazy properties that idempotently instantiate a FireWork object
@property
def partial_fw(self):
if not self._fw:
fields = list(self.db_fields) + list(self.db_launch_fields)
data = self._fwc.find_one({"fw_id": self.fw_id}, projection=fields)
launch_data = {} # move some data to separate launch dict
for key in self.db_launch_fields:
launch_data[key] = data[key]
del data[key]
self._lids = launch_data
self._fw = Firework.from_dict(data)
return self._fw
@property
def full_fw(self):
# map(self._get_launch_data, self.db_launch_fields)
for launch_field in self.db_launch_fields:
self._get_launch_data(launch_field)
return self._fw
# Get a type of Launch object
def _get_launch_data(self, name):
"""
Pull launch data individually for each field.
Args:
name (str): Name of field, e.g. 'archived_launches'.
Returns:
Launch obj (also propagated to self._fw)
"""
fw = self.partial_fw # assure stage 1
if not self._launches[name]:
launch_ids = self._lids[name]
result = []
if launch_ids:
data = self._lc.find({"launch_id": {"$in": launch_ids}})
for ld in data:
ld["action"] = get_action_from_gridfs(ld.get("action"), self._ffs)
result.append(Launch.from_dict(ld))
setattr(fw, name, result) # put into real FireWork obj
self._launches[name] = True
return getattr(fw, name)
def get_action_from_gridfs(action_dict, fallback_fs):
"""
Helper function to obtain the correct dictionary of the FWAction associated
with a launch. If necessary retrieves the information from gridfs based
on its identifier, otherwise simply returns the dictionary in input.
Should be used when accessing a launch to ensure the presence of the
correct action dictionary.
Args:
action_dict (dict): the dictionary contained in the "action" key of a launch
document.
fallback_fs (GridFS): the GridFS with the actions exceeding the 16MB limit.
Returns:
dict: the dictionary of the action.
"""
if not action_dict or "gridfs_id" not in action_dict:
return action_dict
action_gridfs_id = ObjectId(action_dict["gridfs_id"])
action_data = fallback_fs.get(ObjectId(action_gridfs_id))
return json.loads(action_data.read()) | PypiClean |
/NebulasSdkPy-0.4.0.tar.gz/NebulasSdkPy-0.4.0/nebpysdk/src/proto/generated/google/protobuf/internal/wire_format.py | __author__ = '[email protected] (Will Robinson)'
import struct
from google.protobuf import descriptor
from google.protobuf import message
TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag.
TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7
# These numbers identify the wire type of a protocol buffer value.
# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded
# tag-and-type to store one of these WIRETYPE_* constants.
# These values must match WireType enum in google/protobuf/wire_format.h.
WIRETYPE_VARINT = 0
WIRETYPE_FIXED64 = 1
WIRETYPE_LENGTH_DELIMITED = 2
WIRETYPE_START_GROUP = 3
WIRETYPE_END_GROUP = 4
WIRETYPE_FIXED32 = 5
_WIRETYPE_MAX = 5
# Bounds for various integer types.
INT32_MAX = int((1 << 31) - 1)
INT32_MIN = int(-(1 << 31))
UINT32_MAX = (1 << 32) - 1
INT64_MAX = (1 << 63) - 1
INT64_MIN = -(1 << 63)
UINT64_MAX = (1 << 64) - 1
# "struct" format strings that will encode/decode the specified formats.
FORMAT_UINT32_LITTLE_ENDIAN = '<I'
FORMAT_UINT64_LITTLE_ENDIAN = '<Q'
FORMAT_FLOAT_LITTLE_ENDIAN = '<f'
FORMAT_DOUBLE_LITTLE_ENDIAN = '<d'
# We'll have to provide alternate implementations of AppendLittleEndian*() on
# any architectures where these checks fail.
if struct.calcsize(FORMAT_UINT32_LITTLE_ENDIAN) != 4:
raise AssertionError('Format "I" is not a 32-bit number.')
if struct.calcsize(FORMAT_UINT64_LITTLE_ENDIAN) != 8:
raise AssertionError('Format "Q" is not a 64-bit number.')
def PackTag(field_number, wire_type):
"""Returns an unsigned 32-bit integer that encodes the field number and
wire type information in standard protocol message wire format.
Args:
field_number: Expected to be an integer in the range [1, 1 << 29)
wire_type: One of the WIRETYPE_* constants.
"""
if not 0 <= wire_type <= _WIRETYPE_MAX:
raise message.EncodeError('Unknown wire type: %d' % wire_type)
return (field_number << TAG_TYPE_BITS) | wire_type
def UnpackTag(tag):
"""The inverse of PackTag(). Given an unsigned 32-bit number,
returns a (field_number, wire_type) tuple.
"""
return (tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK)
def ZigZagEncode(value):
"""ZigZag Transform: Encodes signed integers so that they can be
effectively used with varint encoding. See wire_format.h for
more details.
"""
if value >= 0:
return value << 1
return (value << 1) ^ (~0)
def ZigZagDecode(value):
"""Inverse of ZigZagEncode()."""
if not value & 0x1:
return value >> 1
return (value >> 1) ^ (~0)
# The *ByteSize() functions below return the number of bytes required to
# serialize "field number + type" information and then serialize the value.
def Int32ByteSize(field_number, int32):
return Int64ByteSize(field_number, int32)
def Int32ByteSizeNoTag(int32):
return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32)
def Int64ByteSize(field_number, int64):
# Have to convert to uint before calling UInt64ByteSize().
return UInt64ByteSize(field_number, 0xffffffffffffffff & int64)
def UInt32ByteSize(field_number, uint32):
return UInt64ByteSize(field_number, uint32)
def UInt64ByteSize(field_number, uint64):
return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64)
def SInt32ByteSize(field_number, int32):
return UInt32ByteSize(field_number, ZigZagEncode(int32))
def SInt64ByteSize(field_number, int64):
return UInt64ByteSize(field_number, ZigZagEncode(int64))
def Fixed32ByteSize(field_number, fixed32):
return TagByteSize(field_number) + 4
def Fixed64ByteSize(field_number, fixed64):
return TagByteSize(field_number) + 8
def SFixed32ByteSize(field_number, sfixed32):
return TagByteSize(field_number) + 4
def SFixed64ByteSize(field_number, sfixed64):
return TagByteSize(field_number) + 8
def FloatByteSize(field_number, flt):
return TagByteSize(field_number) + 4
def DoubleByteSize(field_number, double):
return TagByteSize(field_number) + 8
def BoolByteSize(field_number, b):
return TagByteSize(field_number) + 1
def EnumByteSize(field_number, enum):
return UInt32ByteSize(field_number, enum)
def StringByteSize(field_number, string):
return BytesByteSize(field_number, string.encode('utf-8'))
def BytesByteSize(field_number, b):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(len(b))
+ len(b))
def GroupByteSize(field_number, message):
return (2 * TagByteSize(field_number) # START and END group.
+ message.ByteSize())
def MessageByteSize(field_number, message):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(message.ByteSize())
+ message.ByteSize())
def MessageSetItemByteSize(field_number, msg):
# First compute the sizes of the tags.
# There are 2 tags for the beginning and ending of the repeated group, that
# is field number 1, one with field number 2 (type_id) and one with field
# number 3 (message).
total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3))
# Add the number of bytes for type_id.
total_size += _VarUInt64ByteSizeNoTag(field_number)
message_size = msg.ByteSize()
# The number of bytes for encoding the length of the message.
total_size += _VarUInt64ByteSizeNoTag(message_size)
# The size of the message.
total_size += message_size
return total_size
def TagByteSize(field_number):
"""Returns the bytes required to serialize a tag with this field number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0))
# Private helper function for the *ByteSize() functions above.
def _VarUInt64ByteSizeNoTag(uint64):
"""Returns the number of bytes required to serialize a single varint
using boundary value comparisons. (unrolled loop optimization -WPierce)
uint64 must be unsigned.
"""
if uint64 <= 0x7f: return 1
if uint64 <= 0x3fff: return 2
if uint64 <= 0x1fffff: return 3
if uint64 <= 0xfffffff: return 4
if uint64 <= 0x7ffffffff: return 5
if uint64 <= 0x3ffffffffff: return 6
if uint64 <= 0x1ffffffffffff: return 7
if uint64 <= 0xffffffffffffff: return 8
if uint64 <= 0x7fffffffffffffff: return 9
if uint64 > UINT64_MAX:
raise message.EncodeError('Value out of range: %d' % uint64)
return 10
NON_PACKABLE_TYPES = (
descriptor.FieldDescriptor.TYPE_STRING,
descriptor.FieldDescriptor.TYPE_GROUP,
descriptor.FieldDescriptor.TYPE_MESSAGE,
descriptor.FieldDescriptor.TYPE_BYTES
)
def IsTypePackable(field_type):
"""Return true iff packable = true is valid for fields of this type.
Args:
field_type: a FieldDescriptor::Type value.
Returns:
True iff fields of this type are packable.
"""
return field_type not in NON_PACKABLE_TYPES | PypiClean |
/KiKit-1.3.0-py3-none-any.whl/kikit/common.py | from __future__ import annotations
from typing import List, Optional, Tuple, Union, Callable
from kikit.defs import Layer
from kikit.typing import Box
from pcbnewTransition import pcbnew, isV7
from kikit.intervals import AxialLine
from pcbnewTransition.pcbnew import BOX2I, VECTOR2I, EDA_ANGLE
import os
from itertools import product, chain, islice
import numpy as np
from shapely.geometry import LinearRing
import shapely.geometry
PKG_BASE = os.path.dirname(__file__)
KIKIT_LIB = os.path.join(PKG_BASE, "resources/kikit.pretty")
SHP_EPSILON = pcbnew.FromMM(0.001) # Common factor of enlarging substrates to
# cover up numerical imprecisions of Shapely
KiLength = int
KiAngle = EDA_ANGLE
KiPoint = VECTOR2I
def fromDegrees(angle: Union[float,int]) -> KiAngle:
"""Convert angle in degrees to Kicad angle representation"""
return EDA_ANGLE(angle, pcbnew.DEGREES_T)
def fromMm(mm: float) -> KiLength:
"""Convert millimeters to KiCAD internal units"""
return pcbnew.FromMM(mm)
def toMm(kiUnits: KiLength) -> float:
"""Convert KiCAD internal units to millimeters"""
return pcbnew.ToMM(int(kiUnits))
def toKiCADPoint(p) -> KiPoint:
"""Convert tuple or array like objects to KiCAD point (VECTOR2I)"""
assert len(p) == 2
return VECTOR2I(*[int(x) for x in p])
def fitsIn(what: Union[BOX2I, VECTOR2I], where: BOX2I) -> bool:
"""
Return true iff 'what' (BOX2I or VECTOR2I) is fully contained in 'where'
(BOX2I)
"""
if isV7():
assert isinstance(what, (BOX2I, VECTOR2I, pcbnew.wxPoint))
else:
assert isinstance(what, (BOX2I, VECTOR2I, pcbnew.wxPoint, pcbnew.EDA_RECT))
if isinstance(what, VECTOR2I) or isinstance(what, (VECTOR2I, pcbnew.wxPoint)):
return (what[0] >= where.GetX() and
what[0] <= where.GetX() + where.GetWidth() and
what[1] >= where.GetY() and
what[1] <= where.GetY() + where.GetHeight())
else:
return (what.GetX() >= where.GetX() and
what.GetX() + what.GetWidth() <= where.GetX() + where.GetWidth() and
what.GetY() >= where.GetY() and
what.GetY() + what.GetHeight() <= where.GetY() + where.GetHeight())
def combineBoundingBoxes(a, b):
""" Retrun BOX2I as a combination of source bounding boxes """
x1 = min(a.GetX(), b.GetX())
y1 = min(a.GetY(), b.GetY())
x2 = max(a.GetX() + a.GetWidth(), b.GetX() + b.GetWidth())
y2 = max(a.GetY() + a.GetHeight(), b.GetY() + b.GetHeight())
return BOX2I(toKiCADPoint((x1, y1)), toKiCADPoint((x2 - x1, y2 - y1)))
def collectEdges(board, layerId, sourceArea=None):
""" Collect edges in sourceArea on given layer including footprints """
edges = []
for edge in chain(board.GetDrawings(), *[m.GraphicalItems() for m in board.GetFootprints()]):
if edge.GetLayer() != layerId:
continue
if isinstance(edge, pcbnew.PCB_DIMENSION_BASE):
continue
if not sourceArea or fitsIn(edge.GetBoundingBox(), sourceArea):
edges.append(edge)
return edges
def collectItems(boardCollection, sourceArea):
""" Returns a list of board items fully contained in the source area """
return list([x for x in boardCollection if fitsIn(x.GetBoundingBox(), sourceArea)])
def collectFootprints(boardCollection, sourceArea):
"""
Returns a list of board footprints Which origin fits inside the source area.
"""
return list([x for x in boardCollection if fitsIn(x.GetPosition(), sourceArea)])
def getBBoxWithoutContours(edge):
width = edge.GetWidth()
edge.SetWidth(0)
bBox = edge.GetBoundingBox()
edge.SetWidth(width)
return bBox
def listGeometries(shapelyObject):
"""
Given a shapely object, return an iterable of all geometries. I.e., for
single items, return an iterable containing only the original item. For
collections, return iterable of all the geometries in it.
"""
if hasattr(shapelyObject, 'geoms'):
return shapelyObject.geoms
return [shapelyObject]
def findBoundingBox(edges):
"""
Return a bounding box of all drawings in edges
"""
if len(edges) == 0:
raise RuntimeError("No board edges found")
boundingBox = getBBoxWithoutContours(edges[0])
for edge in edges[1:]:
boundingBox = combineBoundingBoxes(boundingBox, getBBoxWithoutContours(edge))
return boundingBox
def findBoardBoundingBox(board, sourceArea=None):
"""
Returns a bounding box (BOX2I) of all Edge.Cuts items either in
specified source area (BOX2I) or in the whole board
"""
edges = collectEdges(board, Layer.Edge_Cuts, sourceArea)
return findBoundingBox(edges)
def rectCenter(rect):
"""
Given a BOX2I return its center
"""
return toKiCADPoint((rect.GetX() + rect.GetWidth() // 2, rect.GetY() + rect.GetHeight() // 2))
def rectByCenter(center, width, height):
"""
Given a center point and size, return BOX2I
"""
return BOX2I(
toKiCADPoint((center[0] - width // 2, center[1] - height // 2)),
toKiCADPoint((width, height)))
def normalize(vector):
""" Return a vector with unit length """
vec = np.array([vector[0], vector[1]])
return vec / np.linalg.norm(vector)
def makePerpendicular(vector):
"""
Given a 2D vector, return a vector which is perpendicular to the input one
"""
return np.array([vector[1], -vector[0]])
def linestringToSegments(linestring):
"""
Given a Shapely linestring, return a list of tuples with start and endpoint
of the segment
"""
return [x for x in zip(linestring.coords, islice(linestring.coords, 1, None))]
def tl(rect):
""" Return top left corner of rect """
return toKiCADPoint((rect.GetX(), rect.GetY()))
def tr(rect):
""" Return top right corner of rect """
return toKiCADPoint((rect.GetX() + rect.GetWidth(), rect.GetY()))
def br(rect):
""" Return bottom right corner of rect """
return toKiCADPoint((rect.GetX() + rect.GetWidth(), rect.GetY() + rect.GetHeight()))
def bl(rect):
""" Return bottom left corner of rect """
return toKiCADPoint((rect.GetX(), rect.GetY() + rect.GetHeight()))
def removeComponents(board, references):
"""
Remove components with references from the board. References is a list of
strings
"""
for footprint in board.GetFootprints():
if footprint.GetReference() in references:
board.Remove(footprint)
def parseReferences(dStr):
"""
Parse comma separated list of component references to a list
"""
return [x.strip() for x in dStr.split(",") if len(x.strip()) > 0]
def shpBBoxLeft(bbox):
"""
Given a shapely bounding box, return left edge as (pos, interval)
"""
return AxialLine(bbox[0], bbox[1], bbox[3])
def shpBBoxRight(bbox):
"""
Given a shapely bounding box, return right edge as (pos, interval)
"""
return AxialLine(bbox[2], bbox[1], bbox[3])
def shpBBoxTop(bbox):
"""
Given a shapely bounding box, return top edge as (pos, interval)
"""
return AxialLine(bbox[1], bbox[0], bbox[2])
def shpBBoxBottom(bbox):
"""
Given a shapely bounding box, return bottom edge as (pos, interval)
"""
return AxialLine(bbox[3], bbox[0], bbox[2])
def shpBBoxMerge(a: Box, b: Box) -> Box:
"""
Given two shapely bounding boxes, return smallest bounding box where both
can fit.
"""
return (
min(a[0], b[0]),
min(a[1], b[1]),
max(a[2], b[2]),
max(a[3], b[3])
)
def shpBBoxExpand(box: Box, x: float, y: Optional[float]=None) -> Box:
"""
Given a shapely bounding box, return new one expanded by given amount. If y
is not supplied, it the same as x.
"""
if y is None:
y = x
return (box[0] - x, box[1] - y, box[2] + x, box[3] + y)
def shpBoxToRect(box):
box = list([int(x) for x in box])
return BOX2I(toKiCADPoint((box[0], box[1])),
toKiCADPoint((box[2] - box[0], box[3] - box[1])))
def rectToShpBox(rect):
return shapely.geometry.box(rect.GetX(), rect.GetY(),
rect.GetX() + rect.GetWidth(), rect.GetY() + rect.GetHeight())
def isLinestringCyclic(line):
c = line.coords
return c[0] == c[-1] or isinstance(line, LinearRing)
def fromOpt(object, default):
"""
Given an object, return it if not None. Otherwise return default
"""
return object if object is not None else default
def isBottomLayer(layer):
"""
Decide if layer is a bottom layer
"""
return str(layer).startswith("Layer.B_")
def commonPoints(lines):
"""
Given a list of lines, return dictionary - vertice -> count. Where count
specifies how many lines share the vertex.
"""
count = {}
for l in lines:
for c in l.coords:
count[c] = count.get(c, 0) + 1
return count
def isHorizontal(start, end):
"""
Given a line decide if it is horizontal
"""
return start[1] == end[1]
def isVertical(start, end):
"""
Given a line decide if it is vertical
"""
return start[0] == end[0]
def resolveAnchor(anchor):
"""
Given a string anchor name, return a function that transforms BOX2I into
a VECTOR2I
"""
choices = {
"tl": lambda x: x.GetPosition(),
"tr": lambda x: x.GetPosition() + toKiCADPoint((x.GetWidth(), 0)),
"bl": lambda x: x.GetPosition() + toKiCADPoint((0, x.GetHeight())),
"br": lambda x: x.GetPosition() + toKiCADPoint((x.GetWidth(), x.GetHeight())),
"mt": lambda x: x.GetPosition() + toKiCADPoint((x.GetWidth() / 2, 0)),
"mb": lambda x: x.GetPosition() + toKiCADPoint((x.GetWidth() / 2, x.GetHeight())),
"ml": lambda x: x.GetPosition() + toKiCADPoint((0, x.GetHeight() / 2)),
"mr": lambda x: x.GetPosition() + toKiCADPoint((x.GetWidth(), x.GetHeight() / 2)),
"c": lambda x: x.GetPosition() + toKiCADPoint((x.GetWidth() / 2, x.GetHeight() / 2))
}
return choices[anchor]
def splitOn(input: str, predicate: Callable[[str], bool]) \
-> Tuple[str, str]:
"""
Split a string into a head fullfilling predicate and the rest
"""
left = ""
for i, x in enumerate(input):
if predicate(x):
left += x
else:
break
return left, input[i:]
def indexOf(list, predicate):
"""
Return the index of the first element that satisfies predicate. If no
element is found, return -1
"""
for i, x in enumerate(list):
if predicate(x):
return i
return -1
def readParameterList(inputStr):
"""
Given a string, read semicolon separated parameter list in the form of
`key: value; key: value`. You can escape via `\\`
"""
from kikit.panelize_ui import splitStr
if len(inputStr.strip()) == 0:
return {}
try:
values = {}
for i, pair in enumerate(splitStr(";", "\\", inputStr)):
if len(pair.strip()) == 0:
continue
s = pair.split(":")
if i == 0 and len(s) == 1:
values["type"] = s[0].strip()
continue
key, value = s[0].strip(), s[1].strip()
values[key] = value
return values
except (TypeError, IndexError):
raise RuntimeError(f"'{pair}' is not a valid key: value pair")
def fakeKiCADGui():
"""
KiCAD assumes wxApp and locale exists. If we invoke a command, fake the
existence of an app. You should store the application in a top-level
function of the command
"""
import wx
import os
if os.name != "nt" and os.environ.get("DISPLAY", "").strip() == "":
return None
app = wx.App()
app.InitLocale()
return app | PypiClean |
/Newcalls-0.0.1-cp37-cp37m-win_amd64.whl/newcalls/node_modules/@mapbox/node-pre-gyp/lib/util/compile.js | 'use strict';
module.exports = exports;
const fs = require('fs');
const path = require('path');
const win = process.platform === 'win32';
const existsSync = fs.existsSync || path.existsSync;
const cp = require('child_process');
// try to build up the complete path to node-gyp
/* priority:
- node-gyp on ENV:npm_config_node_gyp (https://github.com/npm/npm/pull/4887)
- node-gyp on NODE_PATH
- node-gyp inside npm on NODE_PATH (ignore on iojs)
- node-gyp inside npm beside node exe
*/
function which_node_gyp() {
let node_gyp_bin;
if (process.env.npm_config_node_gyp) {
try {
node_gyp_bin = process.env.npm_config_node_gyp;
if (existsSync(node_gyp_bin)) {
return node_gyp_bin;
}
} catch (err) {
// do nothing
}
}
try {
const node_gyp_main = require.resolve('node-gyp'); // eslint-disable-line node/no-missing-require
node_gyp_bin = path.join(path.dirname(
path.dirname(node_gyp_main)),
'bin/node-gyp.js');
if (existsSync(node_gyp_bin)) {
return node_gyp_bin;
}
} catch (err) {
// do nothing
}
if (process.execPath.indexOf('iojs') === -1) {
try {
const npm_main = require.resolve('npm'); // eslint-disable-line node/no-missing-require
node_gyp_bin = path.join(path.dirname(
path.dirname(npm_main)),
'node_modules/node-gyp/bin/node-gyp.js');
if (existsSync(node_gyp_bin)) {
return node_gyp_bin;
}
} catch (err) {
// do nothing
}
}
const npm_base = path.join(path.dirname(
path.dirname(process.execPath)),
'lib/node_modules/npm/');
node_gyp_bin = path.join(npm_base, 'node_modules/node-gyp/bin/node-gyp.js');
if (existsSync(node_gyp_bin)) {
return node_gyp_bin;
}
}
module.exports.run_gyp = function(args, opts, callback) {
let shell_cmd = '';
const cmd_args = [];
if (opts.runtime && opts.runtime === 'node-webkit') {
shell_cmd = 'nw-gyp';
if (win) shell_cmd += '.cmd';
} else {
const node_gyp_path = which_node_gyp();
if (node_gyp_path) {
shell_cmd = process.execPath;
cmd_args.push(node_gyp_path);
} else {
shell_cmd = 'node-gyp';
if (win) shell_cmd += '.cmd';
}
}
const final_args = cmd_args.concat(args);
const cmd = cp.spawn(shell_cmd, final_args, { cwd: undefined, env: process.env, stdio: [0, 1, 2] });
cmd.on('error', (err) => {
if (err) {
return callback(new Error("Failed to execute '" + shell_cmd + ' ' + final_args.join(' ') + "' (" + err + ')'));
}
callback(null, opts);
});
cmd.on('close', (code) => {
if (code && code !== 0) {
return callback(new Error("Failed to execute '" + shell_cmd + ' ' + final_args.join(' ') + "' (" + code + ')'));
}
callback(null, opts);
});
}; | PypiClean |
/Flask-Sessionstore-0.4.5.tar.gz/Flask-Sessionstore-0.4.5/README.md | Flask-Sessionstore
==================
[](https://travis-ci.org/mcrowson/flask-sessionstore)
[](http://flask-sessionstore.readthedocs.io/en/latest/?badge=latest)
[](https://coveralls.io/github/mcrowson/flask-session)
[](https://www.quantifiedcode.com/app/project/df2c3cad886341899a8e5e2c0fd1a047)
This project is a hard fork of the orphaned Flask-Session project at https://github.com/fengsp/flask-session that aims to provide
python2 and python3 support for a growing number of session backends.
Flask-Sessionstore is an extension for Flask that adds support for Server-side Session to your application.
Please see the [Documentation](flask-sessionstore.rtfd.io) for implementation and configuration instruction.
```bash
pip install flask-sessionstore
```
## Testing
Tests require a running version of MongoDB, Redis, and Memcached. The easiest way to get those
is via docker-compose.
```bash
$ docker-compose up -d
$ nosetests --with-timer
$ docker-compose down
```
| PypiClean |
/CSUMMDET-1.0.23.tar.gz/CSUMMDET-1.0.23/mmdet/models/anchor_heads/fovea_head.py | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import multi_apply, multiclass_nms
from mmdet.ops import DeformConv
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule, bias_init_with_prob
INF = 1e8
class FeatureAlign(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
deformable_groups=4):
super(FeatureAlign, self).__init__()
offset_channels = kernel_size * kernel_size * 2
self.conv_offset = nn.Conv2d(
4, deformable_groups * offset_channels, 1, bias=False)
self.conv_adaption = DeformConv(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
deformable_groups=deformable_groups)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
normal_init(self.conv_offset, std=0.1)
normal_init(self.conv_adaption, std=0.01)
def forward(self, x, shape):
offset = self.conv_offset(shape)
x = self.relu(self.conv_adaption(x, offset))
return x
@HEADS.register_module
class FoveaHead(nn.Module):
"""FoveaBox: Beyond Anchor-based Object Detector
https://arxiv.org/abs/1904.03797
"""
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
base_edge_list=(16, 32, 64, 128, 256),
scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128,
512)),
sigma=0.4,
with_deform=False,
deformable_groups=4,
loss_cls=None,
loss_bbox=None,
conv_cfg=None,
norm_cfg=None):
super(FoveaHead, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.base_edge_list = base_edge_list
self.scale_ranges = scale_ranges
self.sigma = sigma
self.with_deform = with_deform
self.deformable_groups = deformable_groups
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
# box branch
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.fovea_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
# cls branch
if not self.with_deform:
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.fovea_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
else:
self.cls_convs.append(
ConvModule(
self.feat_channels, (self.feat_channels * 4),
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.cls_convs.append(
ConvModule((self.feat_channels * 4), (self.feat_channels * 4),
1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.feature_adaption = FeatureAlign(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deformable_groups=self.deformable_groups)
self.fovea_cls = nn.Conv2d(
int(self.feat_channels * 4),
self.cls_out_channels,
3,
padding=1)
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.fovea_cls, std=0.01, bias=bias_cls)
normal_init(self.fovea_reg, std=0.01)
if self.with_deform:
self.feature_adaption.init_weights()
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def forward_single(self, x):
cls_feat = x
reg_feat = x
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
bbox_pred = self.fovea_reg(reg_feat)
if self.with_deform:
cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp())
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.fovea_cls(cls_feat)
return cls_score, bbox_pred
def get_points(self, featmap_sizes, dtype, device, flatten=False):
points = []
for featmap_size in featmap_sizes:
x_range = torch.arange(
featmap_size[1], dtype=dtype, device=device) + 0.5
y_range = torch.arange(
featmap_size[0], dtype=dtype, device=device) + 0.5
y, x = torch.meshgrid(y_range, x_range)
if flatten:
points.append((y.flatten(), x.flatten()))
else:
points.append((y, x))
return points
def loss(self,
cls_scores,
bbox_preds,
gt_bbox_list,
gt_label_list,
img_metas,
cfg,
gt_bboxes_ignore=None):
assert len(cls_scores) == len(bbox_preds)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
num_imgs = cls_scores[0].size(0)
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_labels, flatten_bbox_targets = self.fovea_target(
gt_bbox_list, gt_label_list, featmap_sizes, points)
pos_inds = (flatten_labels > 0).nonzero().view(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs)
if num_pos > 0:
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_weights = pos_bbox_targets.new_zeros(
pos_bbox_targets.size()) + 1.0
loss_bbox = self.loss_bbox(
pos_bbox_preds,
pos_bbox_targets,
pos_weights,
avg_factor=num_pos)
else:
loss_bbox = torch.tensor([0],
dtype=flatten_bbox_preds.dtype,
device=flatten_bbox_preds.device)
return dict(loss_cls=loss_cls, loss_bbox=loss_bbox)
def fovea_target(self, gt_bbox_list, gt_label_list, featmap_sizes, points):
label_list, bbox_target_list = multi_apply(
self.fovea_target_single,
gt_bbox_list,
gt_label_list,
featmap_size_list=featmap_sizes,
point_list=points)
flatten_labels = [
torch.cat([
labels_level_img.flatten() for labels_level_img in labels_level
]) for labels_level in zip(*label_list)
]
flatten_bbox_targets = [
torch.cat([
bbox_targets_level_img.reshape(-1, 4)
for bbox_targets_level_img in bbox_targets_level
]) for bbox_targets_level in zip(*bbox_target_list)
]
flatten_labels = torch.cat(flatten_labels)
flatten_bbox_targets = torch.cat(flatten_bbox_targets)
return flatten_labels, flatten_bbox_targets
def fovea_target_single(self,
gt_bboxes_raw,
gt_labels_raw,
featmap_size_list=None,
point_list=None):
gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) *
(gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1]))
label_list = []
bbox_target_list = []
# for each pyramid, find the cls and box target
for base_len, (lower_bound, upper_bound), stride, featmap_size, \
(y, x) in zip(self.base_edge_list, self.scale_ranges,
self.strides, featmap_size_list, point_list):
labels = gt_labels_raw.new_zeros(featmap_size)
bbox_targets = gt_bboxes_raw.new(featmap_size[0], featmap_size[1],
4) + 1
# scale assignment
hit_indices = ((gt_areas >= lower_bound) &
(gt_areas <= upper_bound)).nonzero().flatten()
if len(hit_indices) == 0:
label_list.append(labels)
bbox_target_list.append(torch.log(bbox_targets))
continue
_, hit_index_order = torch.sort(-gt_areas[hit_indices])
hit_indices = hit_indices[hit_index_order]
gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride
gt_labels = gt_labels_raw[hit_indices]
half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0])
half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1])
# valid fovea area: left, right, top, down
pos_left = torch.ceil(
gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long().\
clamp(0, featmap_size[1] - 1)
pos_right = torch.floor(
gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long().\
clamp(0, featmap_size[1] - 1)
pos_top = torch.ceil(
gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long().\
clamp(0, featmap_size[0] - 1)
pos_down = torch.floor(
gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long().\
clamp(0, featmap_size[0] - 1)
for px1, py1, px2, py2, label, (gt_x1, gt_y1, gt_x2, gt_y2) in \
zip(pos_left, pos_top, pos_right, pos_down, gt_labels,
gt_bboxes_raw[hit_indices, :]):
labels[py1:py2 + 1, px1:px2 + 1] = label
bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = \
(stride * x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len
bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = \
(stride * y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len
bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = \
(gt_x2 - stride * x[py1:py2 + 1, px1:px2 + 1]) / base_len
bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = \
(gt_y2 - stride * y[py1:py2 + 1, px1:px2 + 1]) / base_len
bbox_targets = bbox_targets.clamp(min=1. / 16, max=16.)
label_list.append(labels)
bbox_target_list.append(torch.log(bbox_targets))
return label_list, bbox_target_list
def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg, rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
points = self.get_points(
featmap_sizes,
bbox_preds[0].dtype,
bbox_preds[0].device,
flatten=True)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list,
featmap_sizes, points,
img_shape, scale_factor, cfg,
rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
featmap_sizes,
point_list,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(point_list)
det_bboxes = []
det_scores = []
for cls_score, bbox_pred, featmap_size, stride, base_len, (y, x) \
in zip(cls_scores, bbox_preds, featmap_sizes, self.strides,
self.base_edge_list, point_list):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).exp()
nms_pre = cfg.get('nms_pre', -1)
if (nms_pre > 0) and (scores.shape[0] > nms_pre):
max_scores, _ = scores.max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
y = y[topk_inds]
x = x[topk_inds]
x1 = (stride * x - base_len * bbox_pred[:, 0]).\
clamp(min=0, max=img_shape[1] - 1)
y1 = (stride * y - base_len * bbox_pred[:, 1]).\
clamp(min=0, max=img_shape[0] - 1)
x2 = (stride * x + base_len * bbox_pred[:, 2]).\
clamp(min=0, max=img_shape[1] - 1)
y2 = (stride * y + base_len * bbox_pred[:, 3]).\
clamp(min=0, max=img_shape[0] - 1)
bboxes = torch.stack([x1, y1, x2, y2], -1)
det_bboxes.append(bboxes)
det_scores.append(scores)
det_bboxes = torch.cat(det_bboxes)
if rescale:
det_bboxes /= det_bboxes.new_tensor(scale_factor)
det_scores = torch.cat(det_scores)
padding = det_scores.new_zeros(det_scores.shape[0], 1)
det_scores = torch.cat([padding, det_scores], dim=1)
det_bboxes, det_labels = multiclass_nms(det_bboxes, det_scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels | PypiClean |
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/ICGS_NMF.py |
#Copyright 2017 Cincinnati Children's Hospital Medical Center, Research Foundation
#Author Meenakshi Venkatasubramanian - [email protected]
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
""" ICGS-NMF Module (Combatible with ICGS2 and splice-ICGS)
https://github.com/venkatmi/oncosplice
Steps applied in this workflow:
1 - Run splice-ICGS (Feature Selection)
2 - Block identification (Rank analysis)
3 - NMF Analysis (Initial subtype identification)
4 - Filter Event Annotation
5 - Meta data analysis (differential expression)
6 - Expand clusters (SVM sample classification)
7 - Mutation enrichment (MAF or VCF - optional)
8 - Correlation depletion (excluded biological confounding signatures)
"""
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import traceback
import sys, string, os
import RNASeq
import numpy as np
from stats_scripts import RNASeq_blockIdentification
from stats_scripts import NMF_Analysis; reload(NMF_Analysis)
from stats_scripts import filterEventAnnotation
from stats_scripts import metaDataAnalysis
from stats_scripts import ExpandSampleClusters; reload(ExpandSampleClusters)
from import_scripts import sampleIndexSelection
from stats_scripts import Correlationdepletion
import UI
import multiprocessing as mlp
import export
upd_guides=[]
import operator
from collections import OrderedDict
from collections import defaultdict
from stats_scripts import Kmeans
from stats_scripts import MutationEnrichment_adj as ME
from visualization_scripts import Orderedheatmap
from visualization_scripts import clustering; reload(clustering)
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import KDTree
import community
import collections
from scipy.stats import variation
import networkx as nx
from sklearn.preprocessing import scale
from numpy import linalg as LA
import scipy
import warnings
warnings.filterwarnings('ignore')
def estimateK(inputfile):
header=[]
X=[]
head=0
counter=0
hgv={}
hgvgenes=[]
diclst={}
for line in open(inputfile,'rU').xreadlines():
if head==0:
line=line.rstrip('\r\n')
q= string.split(line,'\t')
header=q
head=1
continue
else:
val=[]
line=line.rstrip('\r\n')
q= string.split(line,'\t')
#header.append(q[0])
for i in range(1,len(q)):
try:
val.append(float(q[i]))
except Exception:
continue
counter+=1
# break
X.append(val)
#X=zip(*X)
X=np.array(X)
try:
n=float(X.shape[0])
p=float(X.shape[1])
except: ### dimension error - assume k=30
return 15
X=scale(X)
Xt=np.transpose(X)
muTW=float((np.sqrt(n-1))+float(np.sqrt(p)))**2.0
sigmaTW=(float(np.sqrt(n - 1.0)) + float(np.sqrt(p))) * (1.0/float(np.sqrt(n - 1)) + 1.0/float(np.sqrt(p)))**(1.0/3.0)
sigmaHat=np.dot(Xt,X)
bd = 3.273 * sigmaTW + muTW
w,v = LA.eig(sigmaHat)
w=w.tolist()
k=0
for i in range(len(w)):
try:
if w[i]>bd:
k=k+1
except Exception:
if w[i].real>bd:
k=k+1
return k
def caldist(X,i,keys,keylist):
D=[]
Xxd=[]
newlist=[]
#for i in range(len(visited)):
#Xd=np.array(X[i])
#Xd=Xd.reshape(1, -1)
for ii in keys:
if ii==i: continue
newlist.append(ii)
Xxd.append(X[ii].tolist())
Xxd=np.array(Xxd)
Xd=X[i]
#Xd=Xxd
#Xxd=Xxd.tolist()
Xd=Xd.reshape(1, -1)
D=pairwise_distances(Xd,Xxd,metric='euclidean').tolist()
for q in range(len(np.argsort(D)[0])):
if newlist[q] in keylist:
continue
else:
key1=newlist[q]
break
return key1
def hgvfinder(inputfile,numVarGenes=500):
""" Find the highly variable genes by dispersion """
print 'Number of variable genes for dispersion:',numVarGenes
header=[]
X=[]
head=0
counter=0
hgv={}
hgvgenes=[]
for line in open(inputfile,'rU').xreadlines():
if head==0:
line=line.rstrip('\r\n')
q= string.split(line,'\t')
count=len(q)-1
"""
if count >20000:
community=True
else:
community=False
"""
header=q
head=1
continue
else:
val=[]
line=line.rstrip('\r\n')
q= string.split(line,'\t')
#header.append(q[0])
for i in range(1,len(q)):
try:
val.append(float(q[i]))
except Exception:
continue
coun=len(set(val))
qq=q[0].lower()
if (qq.startswith("rpl") or qq.startswith("rps") or qq.startswith("mt-") or qq.startswith("ig")) and community:
continue
else:
if coun >5:
disp=float(np.var(val))/float(np.mean(val))
#me=float(np.mean(val))
hgv[q[0]]=disp
counter+=1
#if counter%500==0: print counter,
# break
#with open('hgv_0.1.txt', 'w') as f:
# for item in hgv:
# f.write(str(item)+"\t"+str(hgv[item]))
# f.write("\n")
#
hgv= sorted(hgv.items(), key=operator.itemgetter(1),reverse=True)
counter=0
for item,item2 in hgv:
if counter<numVarGenes: ### Number of highly variable genes for dispersion
hgvgenes.append(item)
counter+=1
output_file=inputfile[:-4]+'-filtered.txt'
#copy sample index selection file-mv
sampleIndexSelection.filterRows(inputfile,output_file,hgvgenes)
return output_file,count
def community_sampling(inputfile,downsample_cutoff):
""" This function performs downsampling of the input data using networkx to identify
initial distribution of cells, then Louvain clustering using the minimum resolution to
identify discrete initial clusters. """
header=[]
X=[]
head=0
counter=0
hgv={}
hgvgenes=[]
for line in open(inputfile,'rU').xreadlines():
if head==0:
line=line.rstrip('\r\n')
q= string.split(line,'\t')
header=q
head=1
continue
else:
val=[]
line=line.rstrip('\r\n')
q= string.split(line,'\t')
#header.append(q[0])
for i in range(1,len(q)):
try:
val.append(float(q[i]))
except Exception:
continue
counter+=1
# break
X.append(val)
X=zip(*X)
X=np.array(X)
n=X.shape[0]
sampmark=[]
nn=X.shape[0]
nm=X.shape[1]
from annoy import AnnoyIndex
t=AnnoyIndex(nm,metric="euclidean")
for i in range(nn):
try: t.add_item(i,X[i])
except Exception: print i
t.build(100)
### t.save('ICGS.ann')
### u=AnnoyIndex(nm,metric="euclidean")
diclst={}
#### u.load('ICGS.ann')
#n1=25
print "creating graphs"
for i in range(nn):
#ind = tree.query([Xtemp[i]],k=10,return_distance=False,dualtree=True)
ind=t.get_nns_by_item(i,10)
diclst[i]=ind
G=nx.from_dict_of_lists(diclst)
# nx.write_adjlist(G,"test.adjlist")
#G=nx.read_adjlist("test.adjlist")
dendrogram= community.generate_dendrogram(G)
#for level in range(len(dendrogram) - 1):
level=0
pr= community.partition_at_level(dendrogram,level)
commun={}
comval={}
for key1 in pr:
try: commun[pr[key1]].append(key1)
except Exception: commun[pr[key1]]=[key1,]
try: comval[pr[key1]].append(X[int(key1)])
except Exception: comval[pr[key1]]=[X[int(key1)],]
print "Finding medians"
comindices=[]
downsamp_lim=downsample_cutoff*4
for key1 in comval:
k=downsamp_lim/len(comval)
if k<1: k=1
k2=len(comval[key1])
matri=np.array(comval[key1])
matri=np.array(matri)
#n=matri.shape[0]
D=pairwise_distances(matri,metric='euclidean').tolist()
D=np.array(D)
dist=np.mean(D,0)
if k2<k:
k=k2
count=0
for i in np.argsort(dist):
if count<k:
comindices.append(commun[key1][i])
count=count+1
sampmark=[]
for key1 in comindices:
#if count<2500:
#print key1
key=int(key1)
sampmark.append(header[key+1])
return sampmark
def PageRankSampling(inputfile,downsample_cutoff):
""" Google PageRank algorithm from networkX for graph-based link analysis """
header=[]
X=[]
head=0
counter=0
hgv={}
hgvgenes=[]
for line in open(inputfile,'rU').xreadlines():
if head==0:
line=line.rstrip('\r\n')
q= string.split(line,'\t')
header=q
head=1
continue
else:
val=[]
line=line.rstrip('\r\n')
q= string.split(line,'\t')
#header.append(q[0])
for i in range(1,len(q)):
try:
val.append(float(q[i]))
except Exception:
continue
counter+=1
X.append(val)
X=zip(*X)
X=np.array(X)
n=X.shape[0]
sampmark1=[]
downsamp_lim=downsample_cutoff*4
for iq in range(0,n,downsamp_lim):
jj=downsample_cutoff
if iq+downsamp_lim>n:
j=n-iq
else:
j=downsamp_lim
jj=int(float(j+1)/4.0)
jj=downsample_cutoff
#if jj<downsample_cutoff and n<3000:
#jj=n
Xtemp=X[iq:iq+j,]
nn=Xtemp.shape[0]
nm=Xtemp.shape[1]
diclst={}
from annoy import AnnoyIndex
t=AnnoyIndex(nm)
for i in range(nn):
t.add_item(i,Xtemp[i])
t.build(100)
t.save('ICGS.ann')
u=AnnoyIndex(nm)
u.load('ICGS.ann')
#tree = KDTree(X, leaf_size=10, metric='euclidean')
#n1=25
for i in range(nn):
#ind = tree.query([Xtemp[i]],k=10,return_distance=False,dualtree=True)
ind=u.get_nns_by_item(i,10)
diclst[i]=ind
# diclst[i]=ind.tolist()[0]
print "creating graphs"
G=nx.from_dict_of_lists(diclst)
#nx.write_adjlist(G,"test.adjlist")
#G=nx.read_adjlist("test.adjlist")
print "computing page rank"
pr=nx.pagerank(G)
pr= sorted(pr.items(), key=operator.itemgetter(1),reverse=True)
count=0
pr1=OrderedDict()
for (key1,key2) in pr:
if count<jj:
#print key1
key1=iq+int(key1)
pr1[key1,key2]=[]
#print header[key1-1]
sampmark1.append(key1)
count+=1
#with open('pangranresults_0.1.txt', 'w') as f:
#for (key1,key2) in pr:
#f.write(str(key1)+"\t"+str(key2)+"\n")
#f.write("\n")
samp=[]
sampdict={}
sampmark=[]
for (key1,key2) in pr1:
if len(samp)<len(pr1):
if key1 not in samp:
sampdict[key1]=[]
neighbours=list(G.adj[key1])
samp.append(key1)
for ii in range(len(neighbours)):
if neighbours[ii] not in samp and neighbours[ii] in sampmark1:
sampdict[key1].append(neighbours[ii])
samp.append(neighbours[ii])
else:
dup=[]
for key in sampdict:
if key1 in sampdict[key]:
neighbours=list(G.adj[key1])
for ii in range(len(neighbours)):
if neighbours[ii] not in samp and neighbours[ii] in sampmark1:
sampdict[key].append(neighbours[ii])
samp.append(neighbours[ii])
key=pr[0][0]
keylist=[]
keylist.append(key)
while len(keylist) <len(sampdict):
key=caldist(X,key,sampdict,keylist)
keylist.append(key)
for keys in range(len(keylist)):
sampmark.append(header[keylist[keys]+1])
for i in range(len(sampdict[keylist[keys]])):
sampmark.append(header[sampdict[keylist[keys]][i]+1])
#with open('pangranresults_0.1.txt', 'w') as f:
#for item in range(len(sampmark)):
#f.write(str(sampmark[item])+"\n")
#f.write("\n")
samptemp=[]
for i in range(len(header)):
if header[i] in sampmark:
samptemp.append(header[i])
sampmark=samptemp
if len(sampmark)>downsample_cutoff:
output_file=inputfile[:-4]+'-filtered.txt'
sampleIndexSelection.filterFile(inputfile,output_file,sampmark)
sampmark=PageRankSampling(output_file,downsample_cutoff)
return sampmark
else:
return sampmark
def filterPSIValues(filename):
fn = filepath(filename)
firstRow=True
header = True
rows=0
filtered=0
new_file = filename[:-4]+'-75p.txt'
ea = export.ExportFile(new_file)
for line in open(fn,'rU').xreadlines():
data = line.rstrip()
t = string.split(data,'\t')
if header:
header = False
eventindex=t.index('EventAnnotation')
t = [t[1]]+t[eventindex+1:]
header_length = len(t)-1
minimum_values_present = int(header_length)-1
not_detected = header_length-minimum_values_present
new_line = string.join(t,'\t')+'\n'
ea.write(new_line)
else:
t = [t[1]]+t[eventindex+1:]
missing_values_at_the_end = (header_length+1)-len(t)
missing = missing_values_at_the_end+t.count('')
if missing<not_detected:
new_line = string.join(t,'\t')+'\n'
ea.write(new_line)
filtered+=1
rows+=1
ea.close()
return newfile
def header_list(EventAnnot):
head=0
header=[]
with open(EventAnnot, 'rU') as fin:
for line in fin:
if head==0:
line = line.rstrip(os.linesep)
line=string.split(line,'\t')
startpos=line.index('EventAnnotation')
header.append('UID')
for i in range(startpos+1,len(line)):
header.append(line[i])
head=1
else:break
return header
def grpDict(grplst):
head=0
header={}
with open(grplst, 'rU') as fin:
for line in fin:
line = line.rstrip(os.linesep)
line=string.split(line,'\t')
#for i in range(len(line)):
try:header[line[2]].append(line[0])
except Exception: header[line[2]]=[line[0],]
return header
def FindTopUniqueEvents(Guidefile,psi,Guidedir):
head=0
guidekeys=[]
exportnam=os.path.join(Guidedir,"SplicingeventCount1.txt")
export_class=open(exportnam,"a")
tempkeys={}
global upd_guides
global train
omitcluster=0
unique_clusters={}
for line in open(Guidefile,'rU').xreadlines():
if head==0:
line1=line.rstrip('\r\n')
q= string.split(line1,'\t')
head=1
try:
uid=q.index('UID')
adjp=q.index('rawp')
dpsi=q.index('dPSI')
Clusterid=q.index('UpdatedClusterID')
cutoff=0.1
continue
except Exception:
uid=q.index('Symbol')
adjp=q.index('rawp')
dpsi=q.index('LogFold')
Clusterid=q.index('Symbol')
cutoff=0.58
else:
line1=line.rstrip('\r\n')
q= string.split(line1,'\t')
if abs(float(q[dpsi]))>cutoff and float(q[adjp])<0.01:
try:
tempkeys[q[Clusterid]].append([q[uid],float(q[adjp]),q[adjp+1]])
except KeyError:
tempkeys[q[Clusterid]]=[[q[uid],float(q[adjp]),q[adjp+1]],]
for i in tempkeys:
if len(tempkeys[i])>1:
tempkeys[i].sort(key=operator.itemgetter(1),reverse=False)
try:
unique_clusters[0].append(tempkeys[i][0])
except KeyError:
unique_clusters[0]=[tempkeys[i][0],]
else:
try:
unique_clusters[0].append(tempkeys[i][0])
except KeyError:
unique_clusters[0]=[tempkeys[i][0],]
try:
if len(unique_clusters[0])>1:
unique_clusters[0].sort(key=operator.itemgetter(1))
if len(unique_clusters[0])>10:
guidekeys=unique_clusters[0][0:150]
for i in range(0,len(guidekeys)):
upd_guides.append(guidekeys[i][0])
else:
omitcluster=1
else:
omitcluster=1
export_class.write(psi+"\t"+str(len(unique_clusters[0]))+"\n")
except Exception:
omitcluster=1
return omitcluster
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def MergeResults(dire):
file_index={}
count=0
for filename in os.listdir(dire):
if ("Results_max" in filename or "Kmeans" in filename) and "._" not in filename and "ordered" not in filename and "max_t" not in filename:
file_index[filename]=count
count+=1
keylist={}
heads={}
for filename in os.listdir(dire):
if "Results_max" in filename or "Kmeans" in filename:
if "._" not in filename and "ordered" not in filename and "max_t" not in filename:
Guidefile=os.path.join(dire, filename)
head=0
for line in open(Guidefile,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
header=[]
if head==0:
head=1
for i in range(1,len(t)):
header.append(t[i])
heads[filename]=header
continue
else:
val=[]
key=t[0]
for i in range(1,len(t)):
val.append(t[i])
if key not in keylist:
keylist[key]=[[file_index[filename],val],]
else:
keylist[key].append([file_index[filename],val])
exportnam=os.path.join(dire,"MergedResult.txt")
export_class=open(exportnam,"w")
export_class.write("uid")
for filename in file_index:
export_class.write("\t")
export_class.write(string.join(heads[filename],"\t"))
export_class.write("\n")
for key in keylist:
export_class.write(key)
for filename in file_index:
for val1,val2 in keylist[key]:
if file_index[filename]==val1:
export_class.write("\t")
export_class.write(string.join(val2,"\t"))
break
export_class.write("\n")
return exportnam
def DetermineClusterFitness(allgenesfile,markerfile,filterfile,BinarizedOutput,rho_cutoff):
""" Determines whether a cluster has mutiple unique genes and hence should be used for SVM (AKA cluster fitness) """
header=True
genes=[]
nametemp=[]
for line in open(BinarizedOutput,'rU').xreadlines():
data = line.rstrip()
t = string.split(data,'\t')
if header:
header = False
else:
val=[]
for i in range(1,len(t)):
val.append(float(t[i]))
if sum(val)>2:
nametemp.append(t[0])
header=False
genes=[]
for line in open(filterfile,'rU').xreadlines():
data = line.rstrip()
t = string.split(data,'\t')
if header:
header = False
else:
genes.append(t[0])
allgenes={}
header=True
name=[]
for line in open(allgenesfile,'rU').xreadlines():
data = line.rstrip()
t = string.split(data,'\t')
uid = t[0] ### source ID not converted symbol
rho = t[2]
cluster = t[4]
if header:
header = False
else:
if float(rho)>0.3:
allgenes[uid]=cluster
header=True
markerdict={}
counter=1
group=[]
name=[]
common_geneIDs=0
marker_count=0
for line in open(markerfile,'rU').xreadlines():
data = line.rstrip()
t = string.split(data,'\t')
uid = t[0]
rho = t[2]
cluster = t[4]
marker_count+=1
if header:
header = False
else:
if uid in genes:
common_geneIDs+=1
#if rho_cutoff>0.4:rho_cutoff=0.4
rho_cutoff=0.3
#print rho_cutoff
#rho_cutoff=0.2
if float(rho)>rho_cutoff and cluster == allgenes[uid]:
try: markerdict[cluster].append([uid,float(rho)])
except Exception: markerdict[cluster]=[[uid,float(rho)]]
if (common_geneIDs+2)<marker_count:
print 'WARNING... only',common_geneIDs, 'out of', marker_count, 'gene IDs matched after conversion.'
for key in markerdict:
countr=1
if len(markerdict[key])>=2 and key in nametemp:
name.append(key+"_vs_Others.txt")
group.append(counter)
for i,j in markerdict[key] :
#if countr<30:
upd_guides.append(i)
countr+=1
counter+=1
return upd_guides,name,group
def sortFile(allgenesfile,rho_cutoff,name):
markergenes={}
val=[]
header=True
namelst=[]
for i in range(len(name)):
s=string.split(name[i],"_")[0]
namelst.append(s)
for line in open(allgenesfile,'rU').xreadlines():
data = line.rstrip()
t = string.split(data,'\t')
if header:
header = False
else:
values=[]
for i in range(len(t)):
if i ==0:
values.append(t[i])
if i ==2:
values.append(float(t[2]))
if i==4:
if "V" in t[4] and t[4] in namelst:
t[4]=string.replace(t[4],"V","")
values.append(t[4])
else:
values.append(t[4])
val.append(values)
val = sorted(val, key = operator.itemgetter(1),reverse=True)
val = sorted(val, key = operator.itemgetter(2))
count=0
prev="NA"
markerlst=[]
markergrps={}
for i in range(len(val)):
if val[i][2]==prev:
if count<60 and val[i][1]>=0.1: #rho_cutoff
try:markergrps[val[i][2]].append(val[i][0])
except Exception:markergrps[val[i][2]]=[val[i][0],]
markerlst.append(val[i][0])
count=count+1
prev=val[i][2]
else:
prev=val[i][2]
continue
else:
count=0
if val[i][1]>=0.1:
try:markergrps[val[i][2]].append(val[i][0])
except Exception:markergrps[val[i][2]]=[val[i][0],]
markerlst.append(val[i][0])
count=count+1
prev=val[i][2]
return markergrps,markerlst
def generateMarkerheatmap(processedInputExpFile,output_file,NMFSVM_centroid_cluster_dir,groupsdict,markergrps,header1,outputDir,root_dir,species,uniqueIDs):
""" Produces a final MarkerFinder result from ICGS-NMF """
matrix={}
header=True
samples=[]
samples2=[]
samples3=[]
samples_all=[]
samples2_all=[]
groups_list=[]
groups_list_all=[]
genes=[]
genes2=[]
exportnam2=root_dir+'/ICGS-NMF/FinalGroups.txt'
export_class2=open(exportnam2,"w")
for line in open(NMFSVM_centroid_cluster_dir,'rU').xreadlines():
data = line.rstrip()
t = string.split(data,'\t')
sampleOrder=[]
if header:
for i in range(len(t)):
if ":" in t[i]:
val=string.split(t[i],":")[1]
gr=string.split(val,"-")[1]
gr=string.split(gr,"_")[0]
gr=gr.replace("V","")
#sampleOrder.append(string.split(val,"-")[1])
sampleOrder.append(gr)
break
header=True
samp=[]
for line in open(processedInputExpFile,'rU').xreadlines():
data = line.rstrip()
t = string.split(data,'\t')
if header:
for i in range(1,len(t)):
samp.append(t[i])
header=False
continue
else:
for i in range(1,len(t)):
matrix[t[0],samp[i-1]]=t[i]
for i in range(len(sampleOrder)):
for j in range(len(groupsdict[sampleOrder[i]])):
export_class2.write(groupsdict[sampleOrder[i]][j]+"\t"+str(i+1)+"\t"+sampleOrder[i]+"\n")
if groupsdict[sampleOrder[i]][j] in header1:
samples.append(groupsdict[sampleOrder[i]][j])
groups_list.append(sampleOrder[i])
samples2.append(groupsdict[sampleOrder[i]][j])
samples3.append(sampleOrder[i]+':'+groupsdict[sampleOrder[i]][j])
for i in range(len(sampleOrder)):
for j in range(len(markergrps[sampleOrder[i]])):
uid = markergrps[sampleOrder[i]][j]
genes.append(uid)
if uid in uniqueIDs:
symbol = uniqueIDs[uid]
else:
symbol = uid
genes2.append((sampleOrder[i],uid))
MF_subsampled_export = outputDir+'/'+'MarkerFinder-subsampled-ordered.txt'
exportnam=open(MF_subsampled_export,"w")
exportnam.write(string.join(['UID','row_clusters-flat']+samples3,'\t')+'\n')
exportnam.write(string.join(['column_clusters-flat','']+groups_list,'\t')+'\n')
i=0
for i in range(len(genes)):
exportnam.write(genes2[i][1]+"\t"+genes2[i][0])
for j in range(len(samples)):
exportnam.write("\t"+matrix[genes[i],samples2[j]])
exportnam.write("\n")
exportnam.close()
export_class2.close()
graphic_links=[]
row_method=None
column_method=None
column_metric='euclidean'
row_metric='correlation'
color_gradient = 'yellow_black_blue'
transpose=False
import UI
Species=species
platform="RNASeq"
Vendor=""
gsp = UI.GeneSelectionParameters(Species,platform,Vendor)
gsp.setPathwaySelect('None Selected')
gsp.setGeneSelection('')
gsp.setOntologyID('')
gsp.setGeneSet('None Selected')
gsp.setJustShowTheseIDs('')
gsp.setTranspose(False)
gsp.setNormalize('median')
gsp.setGeneSelection('')
#gsp.setClusterGOElite('GeneOntology')
gsp.setClusterGOElite('BioMarkers')
graphic_links = clustering.runHCexplicit(MF_subsampled_export,graphic_links, row_method, row_metric, column_method,column_metric,color_gradient, gsp, display=False, Normalize=True,contrast=5)
graphic_links[-1][0] = MF_subsampled_export
if len(samp)>len(header1):
MF_all_export = outputDir+'/'+'MarkerFinder-Allsamples-ordered.txt'
all_cells_export=open(MF_all_export,"w")
for i in range(len(sampleOrder)):
for j in range(len(groupsdict[sampleOrder[i]])):
samples_all.append(sampleOrder[i]+":"+groupsdict[sampleOrder[i]][j])
groups_list_all.append(sampleOrder[i])
samples2_all.append(groupsdict[sampleOrder[i]][j])
all_cells_export.write(string.join(['UID','row_clusters-flat']+samples_all,'\t')+'\n')
all_cells_export.write(string.join(['column_clusters-flat','']+groups_list_all,'\t')+'\n')
for i in range(len(genes)):
all_cells_export.write(genes2[i][1]+"\t"+genes2[i][0])
for j in range(len(samples_all)):
all_cells_export.write("\t"+matrix[genes[i],samples2_all[j]])
all_cells_export.write("\n")
all_cells_export.close()
graphic_links = clustering.runHCexplicit(MF_all_export,graphic_links, row_method, row_metric, column_method,column_metric,color_gradient, gsp, display=False, Normalize=True,contrast=5)
graphic_links[-1][0] = MF_all_export
status = 'subsampled'
else:
status = 'not-subsampled'
return status, graphic_links
def callICGS(processedInputExpFile,species,rho_cutoff,dynamicCorrelation,platform,gsp):
#Run ICGS recursively to dynamically identify the best rho cutoff
graphic_links3,n = RNASeq.singleCellRNASeqWorkflow(species,platform,processedInputExpFile,mlp,dynamicCorrelation, rpkm_threshold=0, parameters=gsp)
if n>5000 and dynamicCorrelation:
rho_cutoff=rho_cutoff+0.1
gsp.setRhoCutoff(rho_cutoff)
print 'Increasing the Pearson rho threshold to:',rho_cutoff
graphic_links3,n,rho_cutoff=callICGS(processedInputExpFile,species,rho_cutoff,dynamicCorrelation,platform,gsp)
return graphic_links3,n,rho_cutoff
def getAllSourceIDs(fileame,species):
unique_ids={}
symbol_ids={}
IDtype='Symbol'
count=0
typeCount = 0
for line in open(fileame,'rU').xreadlines():
data = cleanUpLine(line)
uid = string.split(data,'\t')[0]
unique_ids[uid]=''
if count<100:
if 'ENS' in uid:
typeCount+=1
IDtype='Ensembl'
else:
try:
int(uid)
typeCount+=1
IDtype='EntrezGene'
except Exception:
pass
count+=1
### Check to see if these IDs are Ensembl IDs or EntrezGene
if typeCount>50: ### If over half of the IDs are EntrezGene or Ensembl
count=0
try:
import gene_associations
gene_annotations = gene_associations.importGeneData(species,IDtype)
except:
gene_annotations={}
for uid in gene_annotations:
if uid in unique_ids:
unique_ids[uid]=gene_annotations[uid].Symbol() #### Convert to Symbol
if 'LRG_' not in uid:
symbol_ids[gene_annotations[uid].Symbol()]=uid
count+=1
print count, IDtype, 'IDs with corresponding gene symbols out of', len(unique_ids)
return unique_ids, symbol_ids
def CompleteICGSWorkflow(root_dir,processedInputExpFile,EventAnnot,iteration,rho_cutoff,dynamicCorrelation,platform,species,scaling,gsp):
""" Run the entire ICGS-NMF workflow, recursively """
originalExpFile = EventAnnot
### Store a list of all valid original IDs (for ID conversions)
uniqueIDs, symbolIDs = getAllSourceIDs(processedInputExpFile,species)
if platform=='PSI':
### For splice-ICGS, the method performs signature depletion (removes correlated events from the prior round) on the Event Annotation file
FilteredEventAnnot=filterEventAnnotation.FilterFile(processedInputExpFile,EventAnnot,iteration)
graphic_links3 = RNASeq.singleCellRNASeqWorkflow(species, 'exons', processedInputExpFile,mlp, rpkm_threshold=0, parameters=gsp)
else:
### For single-cell RNA-Seq - run ICGS recursively to dynamically identify the best rho cutoff
graphic_links3,n,rho_cutoff=callICGS(processedInputExpFile,species,rho_cutoff,dynamicCorrelation,platform,gsp)
Guidefile=graphic_links3[-1][-1]
Guidefile=Guidefile[:-4]+'.txt'
#Guidefile="/Volumes/Pass/ICGS2_testrun/ExpressionInput/amplify/DataPlots/Clustering-exp.input-Guide3 AREG GZMA BTG1 CCL5 TMSB4X ITGA2B UBE2C IRF-hierarchical_euclidean_correlation.txt"
#rho_cutoff=0.2
try:
print "Running block identification for rank analyses - Round"+str(iteration)
try:
RNASeq_blockIdentification.correlateClusteredGenesParameters(Guidefile,rho_cutoff=0.4,hits_cutoff=4,hits_to_report=50,ReDefinedClusterBlocks=True,filter=True)
Guidefile_block=Guidefile[:-4]+'-BlockIDs.txt'
except Exception:
Guidefile_block=Guidefile
### Filters the original expression file for the guide3 genes [returns a filename similar to NMFInput-Round1.txt]
NMFinput,Rank=NMF_Analysis.FilterGuideGeneFile(Guidefile,Guidefile_block,processedInputExpFile,iteration,platform,uniqueIDs,symbolIDs)
#NMFinput="/Volumes/Pass/ICGS2_testrun/ExpressionInput/ICGS-interim/NMFInput-Round1.txt"
try: k = int(gsp.K())
except: k = None; #print traceback.format_exc()
if k==None:
k=estimateK(NMFinput)
Rank=k*2
if Rank>2 and platform=='PSI':
Rank=30
if Rank<5 and platform!='PSI':
Rank=10
### This function prepares files for differential expression analsyis (MetaDataAnalysis), MarkerFinder
filteredInputExpFile = string.replace(processedInputExpFile,'exp.','filteredExp.')
if '-OutliersRemoved' in Guidefile:
filteredInputExpFile = string.replace(filteredInputExpFile,'.txt','-OutliersRemoved.txt')
try: NMFResult,BinarizedOutput,Metadata,Annotation=NMF_Analysis.NMFAnalysis(filteredInputExpFile,NMFinput,Rank,platform,iteration)
except:
try:
Rank=k*1.5
NMFResult,BinarizedOutput,Metadata,Annotation=NMF_Analysis.NMFAnalysis(filteredInputExpFile,NMFinput,Rank,platform,iteration)
except:
Rank=k
NMFResult,BinarizedOutput,Metadata,Annotation=NMF_Analysis.NMFAnalysis(filteredInputExpFile,NMFinput,Rank,platform,iteration)
else:
Rank=k
print "Running NMF analyses for dimension reduction using "+str(Rank)+" ranks - Round"+str(iteration)
print "The number target number of clusters (k/rank) is:",k
filteredInputExpFile = string.replace(processedInputExpFile,'exp.','filteredExp.')
if '-OutliersRemoved' in Guidefile:
filteredInputExpFile = string.replace(filteredInputExpFile,'.txt','-OutliersRemoved.txt')
try:
NMFResult,BinarizedOutput,Metadata,Annotation=NMF_Analysis.NMFAnalysis(filteredInputExpFile,NMFinput,Rank,platform,iteration)
except Exception:
"Exception, choose a lower k value."
if Rank>1:
if platform == 'PSI':
print "Identifying cluster-specific differential splicing events"
findmarkers=False
else:
print 'Identifying cell-population specific genes'
findmarkers=True
if findmarkers:
import markerFinder
### Default path for the NMF clustered groups for MarkerFinder analysis
input_exp_file=root_dir+'/NMF-SVM/ExpressionInput/exp.NMF-MarkerFinder.txt'
logTransform = False
### Work around when performing this analysis on an alternative exon input cluster file
group_exp_file = input_exp_file
fl = UI.ExpressionFileLocationData(input_exp_file,'','',''); fl.setOutputDir(root_dir)
fl.setSpecies(species); fl.setVendor("3'array")
rpkm_threshold = 0.00
fl.setRPKMThreshold(rpkm_threshold)
fl.setCorrelationDirection('up')
compendiumType = 'protein_coding'
genesToReport = 60
correlateAll = True
markerFinder.analyzeData(input_exp_file,species,platform,compendiumType,geneToReport=genesToReport,correlateAll=correlateAll,AdditionalParameters=fl,logTransform=logTransform)
print 'MarkerFinder analysis complete'
#markerfile="/Volumes/Pass/Final_scicgs/ExpressionOutput/MarkerFinder/MarkerGenes_correlations-ReplicateBased.txt"
allgenesfile = root_dir+'/NMF-SVM/ExpressionOutput/MarkerFinder/AllGenes_correlations-ReplicateBased.txt'
markerfile = root_dir+'/NMF-SVM/ExpressionOutput/MarkerFinder/MarkerGenes_correlations-ReplicateBased.txt'
guides=[]
### See if any unique genes are found in a cluster before using it for SVM
guides,name,group=DetermineClusterFitness(allgenesfile,markerfile,input_exp_file,BinarizedOutput,rho_cutoff)
counter=len(group)
else:
if platform=="PSI":
rootdir,CovariateQuery=metaDataAnalysis.remoteAnalysis(species,FilteredEventAnnot,Metadata,'PSI',0.1,use_adjusted_p,0.05,Annotation)
else:
rootdir,CovariateQuery=metaDataAnalysis.remoteAnalysis(species,processedInputExpFile,Metadata,'RNASeq',0.58,use_adjusted_p,0.05,Annotation)
counter=1
Guidedir=rootdir+CovariateQuery
PSIdir=rootdir+'ExpressionProfiles'
global upd_guides
upd_guides=[]
name=[]
group=[]
for filename in os.listdir(Guidedir):
if filename.startswith("PSI."):
Guidefile=os.path.join(Guidedir, filename)
psi=string.replace(filename,"PSI.","")
if filename.startswith("GE."):
Guidefile=os.path.join(Guidedir, filename)
psi=string.replace(filename,"GE.","")
PSIfile=os.path.join(PSIdir, psi)
omitcluster=FindTopUniqueEvents(Guidefile,psi,Guidedir)
if omitcluster==0:
group.append(counter)
name.append(psi)
counter+=1
upd_guides=[x for x in upd_guides if x != ""]
upd_guides=guides
upd_guides=list(set(upd_guides))
scaling=True
grplst=[]
############ Perform SVM classification to assign individual cells to valid-NMF clusters #############
### The below analysis is performed on the down-sampled expression file
if counter>2:
output_dir = root_dir+'/NMF-SVM'
if os.path.exists(output_dir)==False:
export.createExportFolder(output_dir)
#output_file = output_dir+'/SVMInput-Round'+str(iteration)+'.txt'
#ExpandSampleClusters.filterRows(processedInputExpFile,output_file,filterDB=upd_guides,logData=False)
if scaling:
output_fil=EventAnnot
output_file=output_dir+'/SVMInput-Round'+str(iteration)+'.txt'
#output_file1 = "/Users/meenakshi/Documents/Singlecellbest/exp.exp.CD34+.v5-log2_filtered.txt"
ExpandSampleClusters.filterRows(EventAnnot,output_file,filterDB=upd_guides,logData=False)
else:
output_file = output_dir+'/SVMInput-Round'+str(iteration)+'.txt'
ExpandSampleClusters.filterRows(processedInputExpFile,output_file,filterDB=upd_guides,logData=False)
header=ExpandSampleClusters.header_file(output_file)
print "Running SVM prediction for improved subtypes - Round"+str(iteration)
### Create teh training data for SVM
train,null=ExpandSampleClusters.TrainDataGeneration(output_file,BinarizedOutput,name,scaling,exports=False,rootDir=root_dir)
### Determine the medoids (use medoids for SVM but centroids for clustering)
grplst.append(group)
Expand=False ### If Expand == True, use all down-sampled cells for classification rather than medoids (similar cellHarmony)
if Expand==True:
grplst=[]
group=ExpandSampleClusters.Findgroups(BinarizedOutput,name)
grplst.append(group)
### Perform SVM
ExpandSampleClusters.Classify(header,train,output_file,grplst,name,iteration,platform,output_dir,root_dir)
### Create a groups file for the downsampled (or original) file
groupsfile = string.replace(originalExpFile,'exp.','groups.')
groupsfile_downsampled = string.replace(processedInputExpFile,'exp.','groups.')
finalgrpfile=root_dir+"/ICGS-NMF/FinalGroups.txt"
if groupsfile_downsampled == groupsfile:
pass
else:
export.customFileCopy(finalgrpfile,groupsfile_downsampled)
export.customFileCopy(finalgrpfile,groupsfile[:-4]+'-ICGS.txt')
export.customFileCopy(finalgrpfile,groupsfile[:-4]+'-markers.txt')
from shutil import copyfile
### Don't overwrite the original groups
updated_expfile = originalExpFile[:-4]+'-ICGS.txt'
copyfile(originalExpFile, updated_expfile)
if groupsfile_downsampled == groupsfile:
processedInputExpFile = updated_expfile
groupsfile=groupsfile[:-4]+'-ICGS.txt'
### Identify markers for the our final un-ordered clusters (clustering will need to be run after this)
markerFinder.analyzeData(processedInputExpFile,species,platform,compendiumType,geneToReport=genesToReport,correlateAll=correlateAll,AdditionalParameters=fl,logTransform=logTransform)
allgenesfile=root_dir+"/ExpressionOutput/MarkerFinder/AllGenes_correlations-ReplicateBased.txt"
markergrps,markerlst=sortFile(allgenesfile,rho_cutoff,name)
if len(markergrps)!=len(name):
allgenesfile1 = root_dir+'/NMF-SVM/ExpressionOutput/MarkerFinder/AllGenes_correlations-ReplicateBased.txt'
markergrps,markerlst=sortFile(allgenesfile1,rho_cutoff,name)
### To plot the heatmap, use the MarkerFinder genes (function pulls those genes out)
ExpandSampleClusters.filterRows(EventAnnot,processedInputExpFile[:-4]+'-markers.txt',filterDB=markerlst,logData=False) ### the processedInputExpFile is overwritten
groupsdict=grpDict(groupsfile)
SVMBinOutput=root_dir+"/NMF-SVM/SVMOutputs/round1SVC_Results_max.txt"
SVMBinOutput_t=root_dir+"/NMF-SVM/SVMOutputs/round1SVC_Results_max_t.txt"
import csv
from itertools import izip
a = izip(*csv.reader(open(SVMBinOutput,"rb"),delimiter='\t'))
csv.writer(open(SVMBinOutput_t, "wb"),delimiter='\t').writerows(a)
scaling=False ### will calculate centroids rather than medoids
centroids,centroid_heatmap_input=ExpandSampleClusters.TrainDataGeneration(processedInputExpFile[:-4]+'-markers.txt',SVMBinOutput_t,name,scaling,exports=True,rootDir=root_dir)
scaling=True
graphic_links=[]
row_method = "hopach"
column_method="hopach"
column_metric='cosine'
row_metric='correlation'
color_gradient = 'yellow_black_blue'
transpose=False
graphic_links = clustering.runHCexplicit(centroid_heatmap_input,graphic_links, row_method, row_metric, column_method,column_metric,color_gradient, transpose, display=False, Normalize=True)
NMFSVM_centroid_cluster_dir=graphic_links[0][1][:-4]+'.txt'
outputDir = root_dir+"/NMF-SVM/SVMOutputs"
header=ExpandSampleClusters.header_file(NMFinput)
status,graphic_links2=generateMarkerheatmap(processedInputExpFile[:-4]+'-markers.txt',output_file,NMFSVM_centroid_cluster_dir,groupsdict,markergrps,header,outputDir,root_dir,species,uniqueIDs)
import shutil
if status=='not-subsampled':
NMFSVM_centroid_cluster_graphics_dir=graphic_links2[0][1][:-4]
NMFSVM_centroid_cluster_dir=graphic_links2[0][0][:-4]
shutil.copy(NMFSVM_centroid_cluster_dir+'.txt',root_dir+"/ICGS-NMF/FinalMarkerHeatmap.txt")
shutil.copy(NMFSVM_centroid_cluster_graphics_dir+'.png',root_dir+"/ICGS-NMF/FinalMarkerHeatmap.png")
shutil.copy(NMFSVM_centroid_cluster_graphics_dir+'.pdf',root_dir+"/ICGS-NMF/FinalMarkerHeatmap.pdf")
shutil.copy(allgenesfile,root_dir+"/ICGS-NMF/MarkerGenes.txt")
final_exp_file = root_dir+"/ICGS-NMF/FinalMarkerHeatmap.txt"
else:
NMFSVM_centroid_cluster_graphics_dir=graphic_links2[0][1][:-4]
NMFSVM_centroid_cluster_dir=graphic_links2[0][0][:-4]
NMFSVM_centroid_cluster_graphics_dir2=graphic_links2[1][1][:-4]
NMFSVM_centroid_cluster_dir2=graphic_links2[1][0][:-4]
NMFSVM_centroid_cluster_dir=graphic_links2[0][0][:-4]
NMFSVM_centroid_cluster_dir1=graphic_links2[1][0][:-4]
shutil.copy(NMFSVM_centroid_cluster_dir+'.txt',root_dir+"/ICGS-NMF/FinalMarkerHeatmap_sampled.txt")
shutil.copy(NMFSVM_centroid_cluster_graphics_dir+'.png',root_dir+"/ICGS-NMF/FinalMarkerHeatmap_sampled.png")
shutil.copy(NMFSVM_centroid_cluster_graphics_dir+'.pdf',root_dir+"/ICGS-NMF/FinalMarkerHeatmap_sampled.pdf")
shutil.copy(NMFSVM_centroid_cluster_dir2+'.txt',root_dir+"/ICGS-NMF/FinalMarkerHeatmap_all.txt")
shutil.copy(NMFSVM_centroid_cluster_graphics_dir2+'.png',root_dir+"/ICGS-NMF/FinalMarkerHeatmap_all.png")
shutil.copy(NMFSVM_centroid_cluster_graphics_dir2+'.pdf',root_dir+"/ICGS-NMF/FinalMarkerHeatmap_all.pdf")
shutil.copy(allgenesfile,root_dir+"/ICGS-NMF/MarkerGenes.txt")
final_exp_file = root_dir+"/ICGS-NMF/FinalMarkerHeatmap_all.txt"
try:
### Build cell-type annotation FinalGroups file
goelite_path = export.findParentDir(NMFSVM_centroid_cluster_dir)[:-1]+'/GO-Elite/clustering/'+export.findFilename(NMFSVM_centroid_cluster_dir)+'/GO-Elite_results/pruned-results_z-score_elite.txt'
annotatedGroupsFile = RNASeq.predictCellTypesFromClusters(finalgrpfile, goelite_path)
group_alt = clustering.remoteAssignGroupColors(annotatedGroupsFile)
except:
print traceback.format_exc()
print 'Unable to export annotated groups file with predicted cell type names.'
group_alt=None
### Moved UMAP generation to the end (so the coordinates are the final coordinates and the user can review results earlier)
matrix, column_header, row_header, dataset_name, group_db = clustering.importData(final_exp_file,geneFilter=markerlst)
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
#matrix = map(np.array, zip(*matrix)) ### coverts these to tuples
#column_header, row_header = row_header, column_header
finalOutputDir=root_dir+"/ICGS-NMF/"
#clustering.tSNE(np.array(matrix),column_header,dataset_name,group_db,display=False,showLabels=False,species=species,reimportModelScores=False)
try:
clustering.runUMAP(np.array(matrix),column_header,dataset_name,group_db,display=False, group_alt=group_alt,
showLabels=False,species=species,reimportModelScores=False,rootDir=root_dir,finalOutputDir=finalOutputDir)
except:
print traceback.format_exc()
"""
clustering.tSNE(processedInputExpFile,group_db=groupsdict,display=True,showLabels=False,row_header=None,colorByGene=None,species=None,reimportModelScores=False)
### MV need to do
Orderedfile,groupsdict=FindcentroidGroups(filtered,groupfile)
"""
### write final groups ordered
#exportGroups(root_dir+"/ICGS-NMF/FinalMarkerHeatmap.txt",root_dir+"/ICGS-NMF/FinalGroups.txt",platform)
if scaling:
flag=False
return flag,processedInputExpFile,EventAnnot,graphic_links3+graphic_links2
header=Correlationdepletion.header_file(NMFResult)
output_file=output_dir+'/DepletionInput-Round'+str(iteration)+".txt"
sampleIndexSelection.filterFile(processedInputExpFile[:-4]+'-markers.txt',output_file,header)
print "Running Correlation Depletion - Round"+str(iteration)
commonkeys,count=Correlationdepletion.FindCorrelations(NMFResult,output_file,name)
Depleted=Correlationdepletion.DepleteSplicingevents(commonkeys,output_file,count,processedInputExpFile)
processedInputExpFile=Deplete
flag=True
else:
print "No groups found!!! Re-analyze the data with a small k"
"""### Commented out in version 2.1.14
if iteration<2:
gsp.setK(2)
iteration=1
flag,processedInputExpFile,inputExpFile,graphic_links3=CompleteICGSWorkflow(root_dir,processedInputExpFile,originalExpFile,iteration,gsp.RhoCutoff(),dynamicCorrelation,platform,species,scaling,gsp)
try:
print "Running K-means analyses instead of NMF - Round"+str(iteration)
print "Extremely sparse data!! choose a small k"
header=[]
header=Kmeans.header_file(Guidefile_block)
Kmeans.KmeansAnalysis(Guidefile_block,header,processedInputExpFile,iteration)
flag=False
except Exception:
flag=False
"""
flag=False
else:
if Rank==1:
try:
print "Running K-means analyses instead of NMF - Round"+str(iteration)
print "Extremely sparse data!! choose a small k"
header=[]
header=Kmeans.header_file(Guidefile_block)
Kmeans.KmeansAnalysis(Guidefile_block,header,processedInputExpFile,iteration)
flag=False
except Exception:
flag=False
else:
flag=False
return flag,processedInputExpFile,EventAnnot,graphic_links3
except:
print traceback.format_exc()
print 'WARNING!!!! Error encountered in the NMF ICGS analysis... See the above report.'
flag=False
return flag,processedInputExpFile,EventAnnot,graphic_links3
def exportGroups(cluster_file,outdir,platform):
lineNum=1
for line in open(cluster_file,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if lineNum==1: names = t[2:]; lineNum+=1
elif lineNum==2: clusters = t[2:]; lineNum+=1
else: break
out_obj = export.ExportFile(outdir)
for name in names:
cluster = clusters[names.index(name)]
if platform == 'RNASeq':
if 'junction_quantification' not in name and '.bed' not in name:
name = name+'.bed'
elif 'junction_quantification.txt' not in name and '.txt' not in name and '.bed' not in name:
name = name+'.txt'
if ':' in name:
group,name = string.split(name,':')
if cluster=='NA': cluster = group
out_obj.write(name+'\t'+cluster+'\t'+cluster+'\n')
out_obj.close()
def runICGS_NMF(inputExpFile,scaling,platform,species,gsp,enrichmentInput='',dynamicCorrelation=True):
""" Export the filtered expression file then run downsampling analysis and prepares files for ICGS. After running ICGS, peform enrichment analyses """
try: downsample_cutoff = gsp.DownSample()
except: downsample_cutoff = 2500
try: numVarGenes = gsp.NumVarGenes()
except: numVarGenes = 500
print 'DownSample threshold =',downsample_cutoff, 'cells'
try: data_format = string.lower(gsp.CountsNormalization())
except: data_format = 'scaled'
### Scale and log2 normalize a counts expression file
if 'count' in data_format:
print 'Scaling counts as column normalized log2 values.',
from import_scripts import CountsNormalize
inputExpFile = CountsNormalize.normalizeDropSeqCountsMemoryEfficient(inputExpFile)
print 'Filtering the expression dataset (be patient).',
print_out, inputExpFile = RNASeq.singleCellRNASeqWorkflow(species,platform,inputExpFile,mlp,rpkm_threshold=0,parameters=gsp,reportOnly=True)
print 'Running ICGS-NMF'
### Find the parent dir of the output directory (expression file from the GUI will be stored in the output dir [ExpressionInput])
root_dir = export.findParentDir(inputExpFile)[:-1]
if 'ExpressionInput' in inputExpFile:
root_dir = export.findParentDir(root_dir)
exp_file_name = export.findFilename(inputExpFile)
### Assign the expression filename (for single-cell RNA-Seq rather than splicing)
if 'exp.' not in exp_file_name:
exp_file_name = 'exp.' + exp_file_name
########## Perform Downsampling for large datasets ##########
### Use dispersion (variance by mean) to define initial variable genes
inputExpFileVariableGenesDir,n=hgvfinder(inputExpFile,numVarGenes=numVarGenes) ### returns filtered expression file with 500 variable genes
if n>downsample_cutoff and scaling:
if n>15000: ### For extreemly large datasets, Louvain is used as a preliminary downsampling before pagerank
print 'Performing Community Clustering...'
inputExpFileScaled=inputExpFile[:-4]+'-Louvain-downsampled.txt'
### Louvain clustering for down-sampling from >25,000 to 10,000 cells
sampmark=community_sampling(inputExpFileVariableGenesDir,downsample_cutoff) ### returns list of Louvain downsampled cells
### Filer the original expression file using these downsampled cells
sampleIndexSelection.filterFile(inputExpFile,inputExpFileScaled,sampmark)
### Use dispersion (variance by mean) to define post-Louvain selected cell variable genes
inputExpFileVariableGenesDir,n=hgvfinder(inputExpFileScaled,numVarGenes=numVarGenes) ### returns filtered expression file with 500 variable genes
### Run PageRank on the Louvain/dispersion downsampled dataset
sampmark=PageRankSampling(inputExpFileVariableGenesDir,downsample_cutoff)
else:
### Directly run PageRank on the initial dispersion based dataset
sampmark=PageRankSampling(inputExpFileVariableGenesDir,downsample_cutoff)
### Write out final downsampled results to a new file
output_dir = root_dir+'/ExpressionInput'
try: export.createExportFolder(output_dir)
except: pass ### Already exists
processedInputExpFile = root_dir+'/ExpressionInput/'+exp_file_name[:-4]+'-PageRank-downsampled.txt' ### down-sampled file
sampleIndexSelection.filterFile(inputExpFile,processedInputExpFile,sampmark)
else:
output_dir = root_dir+'/ExpressionInput'
try: export.createExportFolder(output_dir)
except: pass ### Already exists
if platform == 'PSI':
### The PSI file name by default is not informative
processedInputExpFile=output_dir+"/exp.spliceICGS-input.txt"
export.customFileCopy(inputExpFile,processedInputExpFile)
elif 'ExpressionInput' not in inputExpFile:
processedInputExpFile = root_dir+'/'+exp_file_name
export.customFileCopy(inputExpFile,processedInputExpFile)
else: processedInputExpFile = inputExpFile
flag=True
iteration=1 ### Always equal to 1 for scRNA-Seq but can increment for splice-ICGS
### Recursively run ICGS with NMF
flag,processedInputExpFile,inputExpFile,graphic_links3=CompleteICGSWorkflow(root_dir,processedInputExpFile,
inputExpFile,iteration,gsp.RhoCutoff(),dynamicCorrelation,platform,species,scaling,gsp)
if platform == 'PSI':
output_dir = root_dir+'/SVMOutputs'
Combinedres=MergeResults(output_dir)
mutlabels={}
if enrichmentInput!='':
print "Running Mutation Enrichment Analyses"
Expand="yes"
mutdict=defaultdict(list)
header=ME.header_file(enrichmentInput)
mutdict=ME.findsiggenepermut(enrichmentInput)
mutlabels=ME.Enrichment(Combinedres,mutdict,enrichmentInput,Expand,header)
if platform == 'PSI':
print "Generating the final consolidated results"
Orderedheatmap.Classify(Combinedres,mutlabels,dire)
Orderedheatmap.Classify(Combinedres,mutlabels,dire,False)
print "successfully completed"
return graphic_links3
if __name__ == '__main__':
"""
processedInputExpFile="/Volumes/Pass/ICGS2_testrun/ExpressionInput/exp.input.txt"
matrix, column_header, row_header, dataset_name, group_db =clustering.importData(processedInputExpFile)
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
#matrix = map(np.array, zip(*matrix)) ### coverts these to tuples
#column_header, row_header = row_header, column_header
directory=export.findParentDir(export.findParentDir(processedInputExpFile)[:-1])+"ICGS-NMF/"
#clustering.tSNE(np.array(matrix),column_header,dataset_name,group_db,display=False,showLabels=False,species="Hs",reimportModelScores=False)
clustering.umap(np.array(matrix),column_header,dataset_name,group_db,display=False,showLabels=False,species="Hs",reimportModelScores=False,directory=directory)
sys.exit()
"""
import getopt
rho_cutoff=0.2
dynamicCorrelation="optimize"
Mutationref=""
platform="RNASeq"
scaling=True
species="Hs"
row_method = 'hopach'
column_method = 'hopach'
row_metric = 'correlation'
column_metric = 'euclidean'
color_gradient = 'yellow_black_blue'
contrast=3
vendor = "RNASeq"
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
excludeCellCycle = False
restrictBy = 'protein_coding'
#restrictBy = 'None'
featurestoEvaluate = 'Genes'
ExpressionCutoff = 0
CountsCutoff = 0
if platform=="PSI":
FoldDiff = 1.2
else:
FoldDiff=4.0
ExpressionCutoff = 1
SamplesDiffering = 4
JustShowTheseIDs=''
removeOutliers = False
PathwaySelection=[]
array_type="RNASeq"
rho_cutoff=rho_cutoff
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Insufficient command line flags supplied."
sys.exit()
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['Input=','rho=','dynamicCorrelation=','Mutationref=','platform=','scaling=','species=','ExpressionCutoff=','CountsCutoff=','FoldDiff=','SamplesDiffering=','removeOutliers=','featurestoEvaluate=','restrictBy=','excludeCellCycle=','column_metric=','column_method=','row_method=','row_metric'])
for opt, arg in options:
#if opt == '--processedInputExpFile': processedInputExpFile=arg
if opt=='--Input':EventAnnot=arg # input file
if opt=='--rho':rho_cutoff=arg # rho cutoff
if opt=='--dynamicCorrelation':
if string.lower(dynamicCorrelation) == 'true' or string.lower(dynamicCorrelation) == 'optimize':
dynamicCorrelation=True #constant using the provided correlation,iteratively optimize correlation cutoff"
else:
dynamicCorrelation=False
if opt=='--Mutationref':Mutationref=arg #reference file provided for enrichment (format groups file)
if opt=='--platform':platform=arg
if opt=='--scaling':scaling=arg # True to scale for large datasets, False run with all samples
if opt=='--species':species=arg
if opt=='--ExpressionCutoff':ExpressionCutoff=arg
if opt=='--CountsCutoff':CountsCutoff=arg
if opt=='--FoldDiff':FoldDiff=arg
if opt=='--SamplesDiffering':SamplesDiffering=arg
if opt=='--removeOutliers':removeOutliers=arg
if opt=='--featurestoEvaluate':featurestoEvaluate=arg
if opt=='--restrictBy':restrictBy=arg
if opt=='--column_metric':column_metric=arg
if opt=='--column_method':column_method=arg
if opt=='--row_method':row_method=arg
if opt=='--row_metric':row_metric=arg
gsp = UI.GeneSelectionParameters(species,array_type,vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setJustShowTheseIDs(JustShowTheseIDs)
gsp.setNormalize('median')
gsp.setSampleDiscoveryParameters(ExpressionCutoff,CountsCutoff,FoldDiff,SamplesDiffering,removeOutliers,featurestoEvaluate,restrictBy,excludeCellCycle,column_metric,column_method,rho_cutoff)
runICGS_NMF(EventAnnot,scaling,dynamicCorrelation,platform,species,Mutationref,gsp) | PypiClean |
/CLAM-3.2.6.tar.gz/CLAM-3.2.6/clam/clients/textstats.py |
import sys
import os
import time
import glob
import random
import codecs
#We may need to do some path magic in order to find the clam.* imports
sys.path.append(sys.path[0] + '/../../')
os.environ['PYTHONPATH'] = sys.path[0] + '/../../'
#Import the CLAM Client API and CLAM Data API and other dependencies
from clam.common.client import *
from clam.common.data import *
from clam.common.formats import *
import clam.common.status
url = None
createlexicon = False
parameters = {}
files = []
#Process arguments and parameters:
for arg in sys.argv[1:]:
if arg[0:7] == "http://":
url = arg
elif arg[0] == '-':
if arg == '-l':
createlexicon = True
elif os.path.isfile(arg):
files.append(arg)
elif os.path.isdir(arg):
files += [ x for x in glob.glob(arg + '/*') if x[0] != '.' ]
else:
print("Unknown argument, or file/directory does not exist: " + arg,file=sys.stderr)
print("Syntax: textstats.py [OPTIONS] URL TEXTFILES",file=sys.stderr)
print("Options: -l\tCreate lexicon",file=sys.stderr)
sys.exit(2)
if not url or not files:
print("Syntax: textstats.py [OPTIONS] URL TEXTFILES",file=sys.stderr)
sys.exit(1)
print("Connecting to server...")
#create client, connect to server, url is the full URL to the base of your webservice.
clamclient = CLAMClient(url)
print("Creating project...")
#this is the name of our project, it consists in part of randomly generated bits (so multiple clients don't use the same project and can run similtaneously)
#CLAM works with 'projects', for automated clients it usually suffices to create a temporary project,
#which we explicitly delete when we're all done. Each client obviously needs its own project, so we
#create a project with a random name:
project = "textstatsclient" + str(random.getrandbits(64))
#Now we call the webservice and create the project
clamclient.create(project)
#get project status and specification
data = clamclient.get(project)
print("Uploading Files...")
#Upload the files (names were passed on the command line) to the webservice, always indicating
#the format.
for f in files:
print("\tUploading " + f + " to webservice...")
#This invokes the actual upload
#TODO: Adapt
clamclient.addinputfile(project, data.inputtemplate('textinput'), f, language='en')
print("Starting...")
#Now we invoke the webservice with the parameters that were passed on the command line, effectively
#starting the project. The start() method takes a project name and a set of keyword arguments, the keywords here
#correspond with the parameter IDs defined by your webservice.
data = clamclient.start(project, createlexicon=createlexicon) #start the process with the specified parameters
#Always check for parameter errors! Don't just assume everything went well! Use startsafe instead of start
#to simply raise exceptions on parameter errors.
if data.errors:
print("An error occured: " + data.errormsg,file=sys.stderr)
for parametergroup, paramlist in data.parameters:
for parameter in paramlist:
if parameter.error:
print("Error in parameter " + parameter.id + ": " + parameter.error,file=sys.stderr)
clamclient.delete(project) #delete our project (remember, it was temporary, otherwise clients would leave a mess)
sys.exit(1)
#If everything went well, the system is now running, we simply wait until it is done and retrieve the status in the meantime
while data.status != clam.common.status.DONE:
time.sleep(5) #wait 5 seconds before polling status
data = clamclient.get(project) #get status again
print("\tPROJECT IS RUNNING: " + str(data.completion) + '% -- ' + data.statusmessage)
#Good, all is done! We should have some output...
print("All done.")
#Download all output files to current directory
for outputfile in data.output:
print("\tDownloading " + str(outputfile) + " ...")
outputfile.copy(os.path.basename(str(outputfile)))
#delete our project (remember, it was temporary, otherwise clients would leave a mess)
clamclient.delete(project)
print("All done! Have a nice day!") | PypiClean |
/JBS_dsnd_distributions-0.1.tar.gz/JBS_dsnd_distributions-0.1/JBS_dsnd_distributions/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/Dabo-0.9.16.tar.gz/Dabo-0.9.16/dabo/ui/uiwx/dHyperLink.py | import wx
import wx.lib.hyperlink as hyperlink
import dabo
from dabo.dLocalize import _
from dabo.ui import makeDynamicProperty
if __name__ == "__main__":
dabo.ui.loadUI("wx")
import dControlMixin as dcm
import dabo.dEvents as dEvents
import dabo.dColors as dColors
from alignmentMixin import AlignmentMixin
class dHyperLink(dcm.dControlMixin, AlignmentMixin, hyperlink.HyperLinkCtrl):
"""
Creates a hyperlink that, when clicked, launches the specified
URL in the user's default browser, or raises a Hit event for your
code to catch and take the appropriate action.
"""
def __init__(self, parent, properties=None, attProperties=None,
*args, **kwargs):
self._baseClass = dHyperLink
preClass = hyperlink.HyperLinkCtrl
dcm.dControlMixin.__init__(self, preClass, parent, properties=properties,
attProperties=attProperties, *args, **kwargs)
# Make the rollover effect the default, unless it was specified as False.
self.ShowHover = self.ShowHover
self.Bind(hyperlink.EVT_HYPERLINK_LEFT, self._onWxHit) ## only called if ShowInBrowser False
self.DoPopup(False)
def onResize(self, evt):
if self.Application.Platform == "Win":
self.refresh()
def refresh(self):
super(dHyperLink, self).refresh()
self.UpdateLink(True)
def _setColors(self):
"""Updated the link with the specified colors."""
lc, vc, rc = self.LinkColor, self.VisitedColor, self.HoverColor
if isinstance(lc, basestring):
lc = dColors.colorTupleFromName(lc)
if isinstance(vc, basestring):
vc = dColors.colorTupleFromName(vc)
if isinstance(rc, basestring):
rc = dColors.colorTupleFromName(rc)
self.SetColours(lc, vc, rc)
self.UpdateLink(True)
def _getUnderlines(self, which):
"""Returns the status for the various underline settings."""
types = ("link", "hover", "visited")
return self.GetUnderlines()[types.index(which)]
def _setUnderlines(self, link, visited, hover):
"""Updated the link with the specified underline settings."""
self.SetUnderlines(link, visited, hover)
self.UpdateLink(True)
def _getShowInBrowser(self):
return getattr(self, "_showInBrowser", True)
def _setShowInBrowser(self, val):
if self._constructed():
self._showInBrowser = bool(val)
self.AutoBrowse(val)
else:
self._properties["ShowInBrowser"] = val
def _getHoverColor(self):
return getattr(self, "_hoverColor", self.GetColours()[2])
def _setHoverColor(self, val):
if self._constructed():
if val != self.HoverColor:
self._hoverColor = val
self._setColors()
else:
self._properties["HoverColor"] = val
def _getHoverUnderline(self):
return self._getUnderlines("hover")
def _setHoverUnderline(self, val):
if self._constructed():
if val != self.HoverUnderline:
self._setUnderlines(self.LinkUnderline, self.VisitedUnderline, bool(val))
else:
self._properties["HoverUnderline"] = val
def _getLinkColor(self):
return getattr(self, "_linkColor", self.GetColours()[0])
def _setLinkColor(self, val):
if self._constructed():
if val != self.LinkColor:
self._linkColor = val
self._setColors()
else:
self._properties["LinkColor"] = val
def _getLinkUnderline(self):
return self._getUnderlines("link")
def _setLinkUnderline(self, val):
if self._constructed():
if val != self.LinkUnderline:
self._setUnderlines(bool(val), self.VisitedUnderline, self.HoverUnderline)
else:
self._properties["LinkUnderline"] = val
def _getShowHover(self):
return getattr(self, "_showHover", True)
def _setShowHover(self, val):
if self._constructed():
self._showHover = bool(val)
self.EnableRollover(val)
else:
self._properties["ShowHover"] = val
def _getURL(self):
return self.GetURL()
def _setURL(self, val):
self.SetURL(val)
def _getVisited(self):
return self.GetVisited()
def _setVisited(self, val):
self.SetVisited(val)
self.UpdateLink(True)
def _getVisitedColor(self):
return getattr(self, "_visitedColor", self.GetColours()[1])
def _setVisitedColor(self, val):
if self._constructed():
if val != self.VisitedColor:
self._visitedColor = val
self._setColors()
else:
self._properties["VisitedColor"] = val
def _getVisitedUnderline(self):
return self._getUnderlines("visited")
def _setVisitedUnderline(self, val):
if self._constructed():
if val != self.VisitedUnderline:
self._setUnderlines(self.LinkUnderline, bool(val), self.HoverUnderline)
else:
self._properties["VisitedUnderline"] = val
ShowInBrowser = property(_getShowInBrowser, _setShowInBrowser, None,
_("""Specifies the behavior of clicking on the hyperlink:
True: open URL in user's default web browser (default)
False: raise Hit event for your code to handle"""))
HoverColor = property(_getHoverColor, _setHoverColor, None,
_("Color of the link when the mouse passes over it. (str or tuple)"))
HoverUnderline = property(_getHoverUnderline, _setHoverUnderline, None,
_("Is the link underlined when the mouse passes over it? (bool)"))
LinkColor = property(_getLinkColor, _setLinkColor, None,
_("Normal (unvisited) link text color. (str or tuple)"))
LinkUnderline = property(_getLinkUnderline, _setLinkUnderline, None,
_("Is the link underlined in the normal state? (bool)"))
ShowHover = property(_getShowHover, _setShowHover, None,
_("Does the link show the hover effect? (bool)"))
URL = property(_getURL, _setURL, None,
_("URL for this link (str)"))
Visited = property(_getVisited, _setVisited, None,
_("Has this link been visited? (bool)"))
VisitedColor = property(_getVisitedColor, _setVisitedColor, None,
_("Color of visited links (str or tuple)"))
VisitedUnderline = property(_getVisitedUnderline, _setVisitedUnderline, None,
_("Is the link underlined in the visited state? (bool)"))
ForeColor = LinkColor
class _dHyperLink_test(dHyperLink):
def _onHit(self, evt):
print "hit"
def afterInit(self):
self.Caption = "The Dabo Wiki"
self.FontSize = 24
self.URL = "http://dabodev.com/wiki/"
self.LinkColor = "olive"
self.VisitedColor = "maroon"
self.HoverColor = "crimson"
self.LinkUnderline = True
self.HoverUnderline = False
self.VisitedUnderline = True
self.bindEvent(dabo.dEvents.Hit, self._onHit)
#self.ShowInBrowser = False
if __name__ == "__main__":
import test
test.Test().runTest(_dHyperLink_test) | PypiClean |
/FMPy-0.3.11-py3-none-any.whl/fmpy/gui/MainWindow.py | from os.path import dirname
try:
from . import compile_resources
compile_resources()
except Exception as e:
print("Failed to compiled resources. %s" % e)
import os
import sys
from PyQt5.QtCore import QCoreApplication, QDir, Qt, pyqtSignal, QUrl, QSettings, QPoint, QTimer, QStandardPaths, \
QPointF, QBuffer, QIODevice
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QLineEdit, QComboBox, QFileDialog, QLabel, QVBoxLayout, QMenu, QMessageBox, QProgressDialog, QProgressBar, QDialog, QGraphicsScene, QGraphicsItemGroup, QGraphicsRectItem, QGraphicsTextItem, QGraphicsPathItem
from PyQt5.QtGui import QDesktopServices, QPixmap, QIcon, QDoubleValidator, QColor, QFont, QPen, QFontMetricsF, QPolygonF, QPainterPath
from fmpy.gui.generated.MainWindow import Ui_MainWindow
import fmpy
from fmpy import read_model_description, supported_platforms, platform
from fmpy.model_description import ScalarVariable
from fmpy.util import can_simulate
from fmpy.gui.model import VariablesTableModel, VariablesTreeModel, VariablesModel, VariablesFilterModel
from fmpy.gui.log import Log, LogMessagesFilterProxyModel
QCoreApplication.setApplicationVersion(fmpy.__version__)
QCoreApplication.setOrganizationName("CATIA-Systems")
QCoreApplication.setApplicationName("FMPy")
import pyqtgraph as pg
pg.setConfigOptions(background='w', foreground='k', antialias=True)
COLLAPSABLE_COLUMNS = ['Type', 'Value Reference', 'Initial', 'Causality', 'Variability', 'Min', 'Max']
class ClickableLabel(QLabel):
""" A QLabel that shows a pointing hand cursor and emits a *clicked* event when clicked """
clicked = pyqtSignal()
def __init__(self, parent=None):
super(ClickableLabel, self).__init__(parent)
self.setCursor(Qt.PointingHandCursor)
def mousePressEvent(self, ev):
self.clicked.emit()
super(ClickableLabel, self).mousePressEvent(ev)
class AboutDialog(QDialog):
def __init__(self, parent=None):
super(AboutDialog, self).__init__(parent)
from .generated.AboutDialog import Ui_Dialog
from .. import __version__, platform, __file__
import sys
import os
self.ui = Ui_Dialog()
self.ui.setupUi(self)
# hide the question mark button
flags = self.windowFlags()
flags &= ~Qt.WindowContextHelpButtonHint
flags |= Qt.MSWindowsFixedSizeDialogHint
self.setWindowFlags(flags)
self.ui.fmpyVersionLabel.setText(__version__)
self.ui.fmiPlatformLabel.setText(platform)
self.ui.installationPathLabel.setText(os.path.dirname(__file__))
self.ui.pythonInterpreterLabel.setText(sys.executable)
self.ui.pythonVersionLabel.setText(sys.version)
class MainWindow(QMainWindow):
variableSelected = pyqtSignal(ScalarVariable, name='variableSelected')
variableDeselected = pyqtSignal(ScalarVariable, name='variableDeselected')
windows = []
windowOffset = QPoint()
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
# save from garbage collection
self.windows.append(self)
# state
self.filename = None
self.result = None
self.modelDescription = None
self.variables = dict()
self.selectedVariables = set()
self.startValues = dict()
self.simulationThread = None
# self.progressDialog = None
self.plotUpdateTimer = QTimer(self)
self.plotUpdateTimer.timeout.connect(self.updatePlotData)
self.curves = []
# UI
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.showColumnActions = {}
# use a smaller default font size on Mac and Linux
if sys.platform in ['darwin', 'linux']:
defaultFont = QFont()
defaultFont.setPixelSize(11)
QApplication.setFont(defaultFont)
self.setStyleSheet("QWidget { font-size: 11px; }")
self.ui.treeView.setAttribute(Qt.WA_MacShowFocusRect, False)
self.ui.tableView.setAttribute(Qt.WA_MacShowFocusRect, False)
self.ui.logTreeView.setAttribute(Qt.WA_MacShowFocusRect, False)
# set the window size to 85% of the available space
geo = QApplication.desktop().availableGeometry()
width = min(geo.width() * 0.85, 1100.0)
height = min(geo.height() * 0.85, 900.0)
self.resize(int(width), int(height))
# hide the variables
self.ui.dockWidget.hide()
# toolbar
self.stopTimeLineEdit = QLineEdit("1")
self.stopTimeLineEdit.setToolTip("Stop time")
self.stopTimeLineEdit.setFixedWidth(50)
self.stopTimeValidator = QDoubleValidator(self)
self.stopTimeValidator.setBottom(0)
self.stopTimeLineEdit.setValidator(self.stopTimeValidator)
self.ui.toolBar.addWidget(self.stopTimeLineEdit)
spacer = QWidget(self)
spacer.setFixedWidth(10)
self.ui.toolBar.addWidget(spacer)
self.fmiTypeComboBox = QComboBox(self)
self.fmiTypeComboBox.addItem("Co-Simulation")
self.fmiTypeComboBox.setToolTip("FMI type")
self.fmiTypeComboBox.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.ui.toolBar.addWidget(self.fmiTypeComboBox)
# disable widgets
self.ui.actionLoadStartValues.setEnabled(False)
self.ui.actionReload.setEnabled(False)
self.ui.actionSettings.setEnabled(False)
self.ui.actionShowLog.setEnabled(False)
self.ui.actionShowResults.setEnabled(False)
self.ui.actionSimulate.setEnabled(False)
self.ui.actionSaveResult.setEnabled(False)
self.ui.actionSavePlottedResult.setEnabled(False)
self.stopTimeLineEdit.setEnabled(False)
self.fmiTypeComboBox.setEnabled(False)
# hide the dock's title bar
self.ui.dockWidget.setTitleBarWidget(QWidget())
self.ui.dockWidgetContents.setMinimumWidth(500)
self.tableModel = VariablesTableModel(self.selectedVariables, self.startValues)
self.tableFilterModel = VariablesFilterModel()
self.tableFilterModel.setSourceModel(self.tableModel)
self.tableFilterModel.setFilterCaseSensitivity(Qt.CaseInsensitive)
self.ui.tableView.setModel(self.tableFilterModel)
self.treeModel = VariablesTreeModel(self.selectedVariables, self.startValues)
self.treeFilterModel = VariablesFilterModel()
self.treeFilterModel.setSourceModel(self.treeModel)
self.treeFilterModel.setFilterCaseSensitivity(Qt.CaseInsensitive)
self.ui.treeView.setModel(self.treeFilterModel)
for i, (w, n) in enumerate(zip(VariablesModel.COLUMN_WIDTHS, VariablesModel.COLUMN_NAMES)):
self.ui.treeView.setColumnWidth(i, w)
self.ui.tableView.setColumnWidth(i, w)
self.hideAllColumns()
# populate the recent files list
settings = QSettings()
recent_files = settings.value("recentFiles", defaultValue=[])
recent_files = self.removeDuplicates(recent_files)
vbox = QVBoxLayout()
if recent_files:
added = set()
for file in recent_files[:5]:
link = QLabel('<a href="%s" style="text-decoration: none">%s</a>' % (file, os.path.basename(file)))
link.setToolTip(file)
link.linkActivated.connect(self.load)
vbox.addWidget(link)
added.add(file)
self.ui.recentFilesGroupBox.setLayout(vbox)
self.ui.recentFilesGroupBox.setVisible(len(recent_files) > 0)
# settings page
self.inputFileMenu = QMenu()
self.inputFileMenu.addAction("New input file...", self.createInputFile)
self.inputFileMenu.addSeparator()
self.inputFileMenu.addAction("Show in Explorer", self.showInputFileInExplorer)
self.inputFileMenu.addAction("Open in default application", self.openInputFile)
self.ui.selectInputButton.setMenu(self.inputFileMenu)
# log page
self.log = Log(self)
self.logFilterModel = LogMessagesFilterProxyModel(self)
self.logFilterModel.setSourceModel(self.log)
self.logFilterModel.setFilterCaseSensitivity(Qt.CaseInsensitive)
self.ui.logTreeView.setModel(self.logFilterModel)
self.ui.clearLogButton.clicked.connect(self.log.clear)
self.log.numberOfDebugMessagesChanged.connect(lambda n: self.ui.showDebugMessagesButton.setText(str(n)))
self.log.numberOfInfoMessagesChanged.connect(lambda n: self.ui.showInfoMessagesButton.setText(str(n)))
self.log.numberOfWarningMessagesChanged.connect(lambda n: self.ui.showWarningMessagesButton.setText(str(n)))
self.log.numberOfErrorMessagesChanged.connect(lambda n: self.ui.showErrorMessagesButton.setText(str(n)))
self.ui.logFilterLineEdit.textChanged.connect(self.logFilterModel.setFilterFixedString)
self.ui.showDebugMessagesButton.toggled.connect(self.logFilterModel.setShowDebugMessages)
self.ui.showInfoMessagesButton.toggled.connect(self.logFilterModel.setShowInfoMessages)
self.ui.showWarningMessagesButton.toggled.connect(self.logFilterModel.setShowWarningMessages)
self.ui.showErrorMessagesButton.toggled.connect(self.logFilterModel.setShowErrorMessages)
# context menu
self.contextMenu = QMenu()
self.actionExpandAll = self.contextMenu.addAction("Expand all")
self.actionExpandAll.triggered.connect(self.ui.treeView.expandAll)
self.actionCollapseAll = self.contextMenu.addAction("Collapse all")
self.actionCollapseAll.triggered.connect(self.ui.treeView.collapseAll)
self.contextMenu.addSeparator()
self.actionCopyVariableName = self.contextMenu.addAction("Copy Variable Name", self.copyVariableName)
self.actionCopyValueReference = self.contextMenu.addAction("Copy Value Reference", self.copyValueReference)
self.contextMenu.addSeparator()
self.actionEditTable = self.contextMenu.addAction("Edit Table", self.editTable)
self.contextMenu.addSeparator()
self.columnsMenu = self.contextMenu.addMenu('Columns')
action = self.columnsMenu.addAction('Show All')
action.triggered.connect(self.showAllColumns)
action = self.columnsMenu.addAction('Hide All')
action.triggered.connect(self.hideAllColumns)
self.columnsMenu.addSeparator()
for column in COLLAPSABLE_COLUMNS:
action = self.columnsMenu.addAction(column)
action.setCheckable(True)
action.toggled.connect(lambda show, col=column: self.showColumn(col, show))
self.showColumnActions[column] = action
self.contextMenu.addSeparator()
self.actionClearPlots = self.contextMenu.addAction("Clear Plots", self.clearPlots)
# file menu
self.ui.actionExit.triggered.connect(QApplication.closeAllWindows)
self.ui.actionLoadStartValues.triggered.connect(self.loadStartValues)
self.ui.actionReload.triggered.connect(lambda: self.load(self.filename))
self.ui.actionSaveChanges.triggered.connect(self.saveChanges)
# tools menu
self.ui.actionValidateFMU.triggered.connect(self.validateFMU)
self.ui.actionCompileDarwinBinary.triggered.connect(lambda: self.compilePlatformBinary('darwin64'))
self.ui.actionCompileLinuxBinary.triggered.connect(lambda: self.compilePlatformBinary('linux64'))
self.ui.actionCompileWin32Binary.triggered.connect(lambda: self.compilePlatformBinary('win32'))
self.ui.actionCompileWin64Binary.triggered.connect(lambda: self.compilePlatformBinary('win64'))
self.ui.actionCreateJupyterNotebook.triggered.connect(self.createJupyterNotebook)
self.ui.actionCreateCMakeProject.triggered.connect(self.createCMakeProject)
self.ui.actionAddWindows32Remoting.triggered.connect(lambda: self.addRemotingBinaries('win64', 'win32'))
self.ui.actionAddLinux64Remoting.triggered.connect(lambda: self.addRemotingBinaries('linux64', 'win64'))
self.ui.actionAddCoSimulationWrapper.triggered.connect(self.addCoSimulationWrapper)
self.ui.actionImportToModelica.triggered.connect(self.importToModelica)
# help menu
self.ui.actionOpenFMI1SpecCS.triggered.connect(lambda: QDesktopServices.openUrl(QUrl('https://fmi-standard.org/assets/releases/FMI_for_CoSimulation_v1.0.1.pdf')))
self.ui.actionOpenFMI1SpecME.triggered.connect(lambda: QDesktopServices.openUrl(QUrl('https://fmi-standard.org/assets/releases/FMI_for_ModelExchange_v1.0.1.pdf')))
self.ui.actionOpenFMI2Spec.triggered.connect(lambda: QDesktopServices.openUrl(QUrl('https://github.com/modelica/fmi-standard/releases/download/v2.0.3/FMI-Specification-2.0.3.pdf')))
self.ui.actionOpenTestFMUs.triggered.connect(lambda: QDesktopServices.openUrl(QUrl('https://github.com/modelica/fmi-cross-check/tree/master/fmus')))
self.ui.actionOpenWebsite.triggered.connect(lambda: QDesktopServices.openUrl(QUrl('https://github.com/CATIA-Systems/FMPy')))
self.ui.actionShowReleaseNotes.triggered.connect(lambda: QDesktopServices.openUrl(QUrl('https://fmpy.readthedocs.io/en/latest/changelog/')))
# filter menu
self.filterMenu = QMenu()
self.filterMenu.addAction(self.ui.actionFilterInputs)
self.filterMenu.addAction(self.ui.actionFilterOutputs)
self.filterMenu.addAction(self.ui.actionFilterParameters)
self.filterMenu.addAction(self.ui.actionFilterCalculatedParameters)
self.filterMenu.addAction(self.ui.actionFilterIndependentVariables)
self.filterMenu.addAction(self.ui.actionFilterLocalVariables)
self.ui.filterToolButton.setMenu(self.filterMenu)
# status bar
self.statusIconLabel = ClickableLabel(self)
self.statusIconLabel.setStyleSheet("QLabel { margin-left: 5px; }")
self.statusIconLabel.clicked.connect(lambda: self.setCurrentPage(self.ui.logPage))
self.ui.statusBar.addPermanentWidget(self.statusIconLabel)
self.statusTextLabel = ClickableLabel(self)
self.statusTextLabel.setMinimumWidth(10)
self.statusTextLabel.clicked.connect(lambda: self.setCurrentPage(self.ui.logPage))
self.ui.statusBar.addPermanentWidget(self.statusTextLabel)
self.ui.statusBar.addPermanentWidget(QWidget(self), 1) # spacer
self.simulationProgressBar = QProgressBar(self)
self.simulationProgressBar.setFixedHeight(18)
self.ui.statusBar.addPermanentWidget(self.simulationProgressBar)
self.simulationProgressBar.setVisible(False)
# connect signals and slots
self.ui.actionNewWindow.triggered.connect(self.newWindow)
self.ui.openButton.clicked.connect(self.open)
self.ui.actionOpen.triggered.connect(self.open)
self.ui.actionSaveResult.triggered.connect(self.saveResult)
self.ui.actionSavePlottedResult.triggered.connect(lambda: self.saveResult(plotted=True))
self.ui.actionSimulate.triggered.connect(self.startSimulation)
self.ui.actionSettings.triggered.connect(lambda: self.setCurrentPage(self.ui.settingsPage))
self.ui.actionShowLog.triggered.connect(lambda: self.setCurrentPage(self.ui.logPage))
self.ui.actionShowResults.triggered.connect(lambda: self.setCurrentPage(self.ui.resultPage))
self.fmiTypeComboBox.currentTextChanged.connect(self.updateSimulationSettings)
self.ui.solverComboBox.currentTextChanged.connect(self.updateSimulationSettings)
self.variableSelected.connect(self.updatePlotLayout)
self.variableDeselected.connect(self.updatePlotLayout)
self.tableModel.variableSelected.connect(self.selectVariable)
self.tableModel.variableDeselected.connect(self.deselectVariable)
self.treeModel.variableSelected.connect(self.selectVariable)
self.treeModel.variableDeselected.connect(self.deselectVariable)
self.ui.filterLineEdit.textChanged.connect(self.treeFilterModel.setFilterFixedString)
self.ui.filterLineEdit.textChanged.connect(self.tableFilterModel.setFilterFixedString)
self.ui.filterToolButton.toggled.connect(self.treeFilterModel.setFilterByCausality)
self.ui.filterToolButton.toggled.connect(self.tableFilterModel.setFilterByCausality)
self.log.currentMessageChanged.connect(self.setStatusMessage)
self.ui.selectInputButton.clicked.connect(self.selectInputFile)
self.ui.actionShowAboutDialog.triggered.connect(self.showAboutDialog)
if os.name == 'nt':
self.ui.actionCreateDesktopShortcut.triggered.connect(self.createDesktopShortcut)
self.ui.actionAddFileAssociation.triggered.connect(self.addFileAssociation)
else:
self.ui.actionCreateDesktopShortcut.setEnabled(False)
self.ui.actionAddFileAssociation.setEnabled(False)
self.ui.tableViewToolButton.toggled.connect(lambda show: self.ui.variablesStackedWidget.setCurrentWidget(self.ui.tablePage if show else self.ui.treePage))
for model in [self.treeFilterModel, self.tableFilterModel]:
self.ui.actionFilterInputs.triggered.connect(model.setFilterInputs)
self.ui.actionFilterOutputs.triggered.connect(model.setFilterOutputs)
self.ui.actionFilterParameters.triggered.connect(model.setFilterParameters)
self.ui.actionFilterCalculatedParameters.triggered.connect(model.setFilterCalculatedParameters)
self.ui.actionFilterIndependentVariables.triggered.connect(model.setFilterIndependentVariables)
self.ui.actionFilterLocalVariables.triggered.connect(model.setFilterLocalVariables)
self.ui.treeView.customContextMenuRequested.connect(self.showContextMenu)
self.ui.tableView.customContextMenuRequested.connect(self.showContextMenu)
def newWindow(self):
window = MainWindow()
window.show()
def show(self):
super(MainWindow, self).show()
self.move(self.frameGeometry().topLeft() + self.windowOffset)
self.windowOffset += QPoint(20, 20)
def showContextMenu(self, point):
""" Update and show the variables context menu """
from .TableDialog import TableDialog
if self.ui.variablesStackedWidget.currentWidget() == self.ui.treePage:
currentView = self.ui.treeView
else:
currentView = self.ui.tableView
self.actionExpandAll.setEnabled(currentView == self.ui.treeView)
self.actionCollapseAll.setEnabled(currentView == self.ui.treeView)
selected = self.getSelectedVariables()
self.actionEditTable.setEnabled(len(selected) == 1 and TableDialog.canEdit(selected[0]))
can_copy = len(selected) > 0
self.actionCopyVariableName.setEnabled(can_copy)
self.actionCopyValueReference.setEnabled(can_copy)
self.contextMenu.exec_(currentView.mapToGlobal(point))
def load(self, filename):
import zipfile
if not self.isVisible():
self.show()
try:
self.modelDescription = md = read_model_description(filename)
except Exception as e:
QMessageBox.warning(self, "Failed to load FMU", "Failed to load %s. %s" % (filename, e))
return
# show model.png
try:
pixmap = QPixmap()
# load the model.png
with zipfile.ZipFile(filename, 'r') as zf:
pixmap.loadFromData(zf.read('model.png'), format='PNG')
# show the unscaled version in tooltip
buffer = QBuffer()
buffer.open(QIODevice.WriteOnly)
pixmap.save(buffer, "PNG", quality=100)
image = bytes(buffer.data().toBase64()).decode()
html = '<img src="data:image/png;base64,{}">'.format(image)
self.ui.modelImageLabel.setToolTip(html)
# show a scaled preview in "Model Info"
pixmap = pixmap.scaled(200, 200, Qt.KeepAspectRatio, Qt.SmoothTransformation)
self.ui.modelImageLabel.setPixmap(pixmap)
except:
self.ui.modelImageLabel.setPixmap(QPixmap())
self.ui.modelImageLabel.setToolTip(None)
self.filename = filename
platforms = supported_platforms(self.filename)
self.variables.clear()
self.selectedVariables.clear()
self.startValues.clear()
for v in md.modelVariables:
self.variables[v.name] = v
if v.causality == 'output' and not v.dimensions:
self.selectedVariables.add(v)
fmi_types = []
if md.coSimulation:
fmi_types.append('Co-Simulation')
if md.modelExchange:
fmi_types.append('Model Exchange')
experiment = md.defaultExperiment
# toolbar
if experiment is not None and experiment.stopTime is not None:
self.stopTimeLineEdit.setText(str(experiment.stopTime))
# actions
self.ui.actionValidateFMU.setEnabled(True)
can_compile = md.fmiVersion != '1.0' and 'c-code' in platforms
self.ui.actionCompileDarwinBinary.setEnabled(can_compile and fmpy.system == 'darwin')
self.ui.actionCompileLinuxBinary.setEnabled(can_compile and fmpy.system in ['linux', 'windows'])
self.ui.actionCompileWin32Binary.setEnabled(can_compile and fmpy.system == 'windows')
self.ui.actionCompileWin64Binary.setEnabled(can_compile and fmpy.system == 'windows')
self.ui.actionCreateCMakeProject.setEnabled(can_compile)
self.ui.actionCreateJupyterNotebook.setEnabled(True)
self.ui.actionAddWindows32Remoting.setEnabled(md.fmiVersion == '2.0' and 'win32' in platforms and 'win64' not in platforms)
self.ui.actionAddLinux64Remoting.setEnabled(md.fmiVersion == '2.0' and 'win64' in platforms and 'linux64' not in platforms)
can_add_cswrapper = md.fmiVersion == '2.0' and md.coSimulation is None and md.modelExchange is not None
self.ui.actionAddCoSimulationWrapper.setEnabled(can_add_cswrapper)
# variables view
self.treeModel.setModelDescription(md)
self.tableModel.setModelDescription(md)
self.treeFilterModel.invalidate()
self.tableFilterModel.invalidate()
self.ui.treeView.reset()
self.ui.tableView.reset()
# settings page
self.ui.fmiVersionLabel.setText(md.fmiVersion)
self.ui.fmiTypeLabel.setText(', '.join(fmi_types))
self.ui.platformsLabel.setText(', '.join(platforms))
self.ui.modelNameLabel.setText(md.modelName)
self.ui.descriptionLabel.setText(md.description)
self.ui.numberOfContinuousStatesLabel.setText(str(md.numberOfContinuousStates))
self.ui.numberOfEventIndicatorsLabel.setText(str(md.numberOfEventIndicators))
self.ui.numberOfVariablesLabel.setText(str(len(md.modelVariables)))
self.ui.generationToolLabel.setText(md.generationTool)
self.ui.generationDateAndTimeLabel.setText(md.generationDateAndTime)
# relative tolerance
if experiment is not None and experiment.tolerance is not None:
relative_tolerance = experiment.tolerance
else:
relative_tolerance = 1e-6
self.ui.relativeToleranceLineEdit.setText(str(relative_tolerance))
# output interval
if experiment is not None and experiment.stepSize is not None:
output_interval = float(experiment.stepSize)
while output_interval > 1000:
output_interval *= 0.5
else:
output_interval = float(self.stopTimeLineEdit.text()) / 500
self.ui.outputIntervalLineEdit.setText(str(output_interval))
self.fmiTypeComboBox.clear()
self.fmiTypeComboBox.addItems(fmi_types)
self.updateSimulationSettings()
self.setCurrentPage(self.ui.settingsPage)
self.ui.dockWidget.show()
self.ui.actionReload.setEnabled(True)
self.ui.actionSettings.setEnabled(True)
self.ui.actionShowLog.setEnabled(True)
self.ui.actionShowResults.setEnabled(False)
can_sim, _ = can_simulate(platforms)
self.ui.actionLoadStartValues.setEnabled(can_sim)
self.ui.actionSimulate.setEnabled(can_sim)
self.stopTimeLineEdit.setEnabled(can_sim)
self.fmiTypeComboBox.setEnabled(can_sim and len(fmi_types) > 1)
self.ui.settingsGroupBox.setEnabled(can_sim)
settings = QSettings()
recent_files = settings.value("recentFiles", defaultValue=[])
recent_files = self.removeDuplicates([filename] + recent_files)
# save the 10 most recent files
settings.setValue('recentFiles', recent_files[:10])
self.setWindowTitle("%s - FMPy" % os.path.normpath(filename))
self.createGraphics()
def open(self):
start_dir = QDir.homePath()
settings = QSettings()
recent_files = settings.value("recentFiles", defaultValue=[])
for filename in recent_files:
dirname = os.path.dirname(filename)
if os.path.isdir(dirname):
start_dir = dirname
break
filename, _ = QFileDialog.getOpenFileName(parent=self,
caption="Open File",
directory=start_dir,
filter="FMUs (*.fmu);;All Files (*.*)")
if filename:
self.load(filename)
def setCurrentPage(self, widget):
""" Set the current page and the actions """
# block the signals during the update
self.ui.actionSettings.blockSignals(True)
self.ui.actionShowLog.blockSignals(True)
self.ui.actionShowResults.blockSignals(True)
self.ui.stackedWidget.setCurrentWidget(widget)
# toggle the actions
self.ui.actionSettings.setChecked(widget == self.ui.settingsPage)
self.ui.actionShowLog.setChecked(widget == self.ui.logPage)
self.ui.actionShowResults.setChecked(widget == self.ui.resultPage)
# un-block the signals during the update
self.ui.actionSettings.blockSignals(False)
self.ui.actionShowLog.blockSignals(False)
self.ui.actionShowResults.blockSignals(False)
def selectInputFile(self):
start_dir = os.path.dirname(self.filename)
filename, _ = QFileDialog.getOpenFileName(parent=self,
caption="Select Input File",
directory=start_dir,
filter="FMUs (*.csv);;All Files (*.*)")
if filename:
self.ui.inputFilenameLineEdit.setText(filename)
def createInputFile(self):
""" Create an input file based on the input variables in the model description """
input_variables = []
for variable in self.modelDescription.modelVariables:
if variable.causality == 'input':
input_variables.append(variable)
if len(input_variables) == 0:
QMessageBox.warning(self,
"Cannot create input file",
"The input file cannot be created because the model has no input variables")
return
filename, _ = os.path.splitext(self.filename)
filename, _ = QFileDialog.getSaveFileName(parent=self,
caption="Save Input File",
directory=filename + '_in.csv',
filter="Comma Separated Values (*.csv);;All Files (*.*)")
if not filename:
return
with open(filename, 'w') as f:
# column names
f.write('"time"')
for variable in input_variables:
f.write(',"%s"' % variable.name)
f.write('\n')
# example data
f.write(','.join(['0'] * (len(input_variables) + 1)) + '\n')
self.ui.inputFilenameLineEdit.setText(filename)
def showInputFileInExplorer(self):
""" Reveal the input file in the file browser """
filename = self.ui.inputFilenameLineEdit.text()
if not os.path.isfile(filename):
QMessageBox.warning(self, "Cannot show input file", "The input file does not exist")
return
QDesktopServices.openUrl(QUrl.fromLocalFile(os.path.dirname(filename)))
def openInputFile(self):
""" Open the input file in the default application """
filename = self.ui.inputFilenameLineEdit.text()
if not os.path.isfile(filename):
QMessageBox.warning(self, "Cannot open input file", "The input file does not exist")
return
QDesktopServices.openUrl(QUrl.fromLocalFile(filename))
def updateSimulationSettings(self):
if self.fmiTypeComboBox.currentText() == 'Co-Simulation':
self.ui.solverComboBox.setEnabled(False)
self.ui.stepSizeLineEdit.setEnabled(False)
self.ui.relativeToleranceLineEdit.setEnabled(True)
else:
self.ui.solverComboBox.setEnabled(True)
fixed_step = self.ui.solverComboBox.currentText() == 'Fixed-step'
self.ui.stepSizeLineEdit.setEnabled(fixed_step)
self.ui.relativeToleranceLineEdit.setEnabled(not fixed_step)
def selectVariable(self, variable):
self.selectedVariables.add(variable)
self.variableSelected.emit(variable)
def deselectVariable(self, variable):
self.selectedVariables.remove(variable)
self.variableDeselected.emit(variable)
def startSimulation(self):
from fmpy.gui.simulation import SimulationThread
try:
stop_time = float(self.stopTimeLineEdit.text())
step_size = float(self.ui.stepSizeLineEdit.text())
relative_tolerance = float(self.ui.relativeToleranceLineEdit.text())
if self.ui.outputIntervalRadioButton.isChecked():
output_interval = float(self.ui.outputIntervalLineEdit.text())
else:
max_samples = float(self.ui.maxSamplesLineEdit.text())
output_interval = stop_time / max_samples
except Exception as ex:
self.log.log('error', "Failed to start simulation: %s" % ex)
self.ui.stackedWidget.setCurrentWidget(self.ui.logPage)
return
step_size = min(step_size, output_interval)
if self.ui.solverComboBox.currentText() == 'Fixed-step':
solver = 'Euler'
else:
solver = 'CVode'
if self.ui.inputCheckBox.isChecked():
input_variables = []
for variable in self.modelDescription.modelVariables:
if variable.causality == 'input':
input_variables.append(variable.name)
try:
from fmpy.util import read_csv
filename = self.ui.inputFilenameLineEdit.text()
input = read_csv(filename, variable_names=input_variables)
except Exception as e:
self.log.log('error', "Failed to load input from '%s'. %s" % (filename, e))
return
else:
input = None
output = []
for variable in self.modelDescription.modelVariables:
output.append(variable.name)
fmi_type = 'CoSimulation' if self.fmiTypeComboBox.currentText() == 'Co-Simulation' else 'ModelExchange'
self.simulationThread = SimulationThread(filename=self.filename,
fmiType=fmi_type,
stopTime=stop_time,
solver=solver,
stepSize=step_size,
relativeTolerance=relative_tolerance,
outputInterval=output_interval,
startValues=self.startValues,
applyDefaultStartValues=self.ui.applyDefaultStartValuesCheckBox.isChecked(),
input=input,
output=output,
debugLogging=self.ui.debugLoggingCheckBox.isChecked(),
fmiLogging=self.ui.logFMICallsCheckBox.isChecked())
self.ui.actionSimulate.setIcon(QIcon(':/icons/stop.png'))
self.ui.actionSimulate.setToolTip("Stop simulation")
self.ui.actionSimulate.triggered.disconnect(self.startSimulation)
self.ui.actionSimulate.triggered.connect(self.simulationThread.stop)
self.simulationProgressBar.setVisible(True)
self.simulationThread.messageChanged.connect(self.log.log)
self.simulationThread.progressChanged.connect(self.simulationProgressBar.setValue)
self.simulationThread.finished.connect(self.simulationFinished)
if self.ui.clearLogOnStartButton.isChecked():
self.log.clear()
self.setCurrentPage(self.ui.resultPage)
self.simulationThread.start()
self.plotUpdateTimer.start(100)
self.updatePlotLayout()
def simulationFinished(self):
# update UI
self.ui.actionSimulate.triggered.disconnect(self.simulationThread.stop)
self.ui.actionSimulate.triggered.connect(self.startSimulation)
self.ui.actionSimulate.setIcon(QIcon(':/icons/play.png'))
self.ui.actionSimulate.setToolTip("Start simulation")
self.plotUpdateTimer.stop()
self.simulationProgressBar.setVisible(False)
self.ui.actionShowResults.setEnabled(True)
self.ui.actionSettings.setEnabled(True)
self.setCurrentPage(self.ui.resultPage)
self.updatePlotLayout()
if self.result is None:
self.setCurrentPage(self.ui.logPage)
else:
self.ui.actionSaveResult.setEnabled(True)
self.ui.actionSavePlottedResult.setEnabled(True)
self.result = self.simulationThread.result
self.simulationThread = None
self.updatePlotData()
def updatePlotData(self):
import numpy as np
if self.simulationThread is not None and len(self.simulationThread.rows) > 1:
# get results from current simulation
self.result = np.array(self.simulationThread.rows, dtype=np.dtype(self.simulationThread.cols))
if self.result is None:
return # no results available yet
time = self.result['time']
for variable, curve in self.curves:
if variable.name not in self.result.dtype.names:
continue
y = self.result[variable.name]
if variable.type == 'Real':
curve.setData(x=time, y=y)
else:
curve.setData(x=np.repeat(time, 2)[1:], y=np.repeat(y, 2)[:-1])
def updatePlotLayout(self):
self.ui.plotWidget.clear()
self.curves[:] = []
if self.simulationThread is not None:
stop_time = self.simulationThread.stopTime
elif self.result is not None:
stop_time = self.result['time'][-1]
else:
stop_time = 1.0
pen = (0, 0, 255)
for variable in self.selectedVariables:
self.ui.plotWidget.nextRow()
plot = self.ui.plotWidget.addPlot()
if variable.type == 'Real':
curve = plot.plot(pen=pen)
else:
if variable.type == 'Boolean':
plot.setYRange(0, 1, padding=0.2)
plot.getAxis('left').setTicks([[(0, 'false'), (1, 'true')], []])
curve = plot.plot(pen=pen, fillLevel=0, fillBrush=(0, 0, 255, 50), antialias=False)
else:
curve = plot.plot(pen=pen, antialias=False)
plot.setXRange(0, stop_time, padding=0.05)
plot.setLabel('left', variable.name)
plot.showGrid(x=True, y=True, alpha=0.25)
# hide the auto-scale button and disable context menu and mouse interaction
plot.hideButtons()
plot.setMouseEnabled(False, False)
plot.setMenuEnabled(False)
self.curves.append((variable, curve))
self.updatePlotData()
def showColumn(self, name, show):
if name in self.showColumnActions:
self.showColumnActions[name].setChecked(show)
i = VariablesModel.COLUMN_NAMES.index(name)
self.ui.treeView.setColumnHidden(i, not show)
self.ui.tableView.setColumnHidden(i, not show)
def showAllColumns(self):
for name in COLLAPSABLE_COLUMNS:
self.showColumn(name, True)
def hideAllColumns(self):
for name in COLLAPSABLE_COLUMNS:
self.showColumn(name, False)
def setStatusMessage(self, level, text):
if level in ['debug', 'info', 'warning', 'error']:
self.statusIconLabel.setPixmap(QPixmap(':/icons/%s-16x16.png' % level))
else:
self.statusIconLabel.setPixmap(QPixmap())
self.statusTextLabel.setText(text)
def dragEnterEvent(self, event):
for url in event.mimeData().urls():
if not url.isLocalFile():
return
event.acceptProposedAction()
def dropEvent(self, event):
urls = event.mimeData().urls()
for url in urls:
if url == urls[0]:
window = self
else:
window = MainWindow()
window.load(url.toLocalFile())
def saveResult(self, plotted=False):
filename, _ = os.path.splitext(self.filename)
filename, _ = QFileDialog.getSaveFileName(parent=self,
caption="Save Result",
directory=filename + '_out.csv',
filter="Comma Separated Values (*.csv);;All Files (*.*)")
if filename:
from ..util import write_csv
if plotted:
columns = [variable.name for variable in self.selectedVariables]
else:
columns = None
try:
write_csv(filename=filename, result=self.result, columns=columns)
except Exception as e:
QMessageBox.critical(self, "Failed to write result", '"Failed to write "%s". %s' % (filename, e))
def createDesktopShortcut(self):
""" Create a desktop shortcut to start the GUI """
import os
from win32com.client import Dispatch
import sys
env = os.environ.get('CONDA_DEFAULT_ENV')
if env is None:
target_path = sys.executable
root, ext = os.path.splitext(target_path)
pythonw = root + 'w' + ext
if os.path.isfile(pythonw):
target_path = pythonw
arguments = '-m fmpy.gui'
else:
for path in os.environ["PATH"].split(os.pathsep):
activate = os.path.join(path, 'activate.bat')
if os.path.isfile(activate):
break
target_path = r'%windir%\System32\cmd.exe'
arguments = '/C ""%s" %s && python -m fmpy.gui"' % (activate, env)
file_path = os.path.dirname(__file__)
icon = os.path.join(file_path, 'icons', 'app_icon.ico')
desktop_locations = QStandardPaths.standardLocations(QStandardPaths.DesktopLocation)
shortcut_path = os.path.join(desktop_locations[0], "FMPy GUI.lnk")
shell = Dispatch('WScript.Shell')
shortcut = shell.CreateShortCut(shortcut_path)
shortcut.Targetpath = target_path
shortcut.Arguments = arguments
# shortcut.WorkingDirectory = ...
shortcut.IconLocation = icon
shortcut.save()
def showAboutDialog(self):
dialog = AboutDialog(self)
dialog.show()
@staticmethod
def removeDuplicates(seq):
""" Remove duplicates from a sequence """
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def validateFMU(self):
from ..validation import validate_fmu
problems = validate_fmu(self.filename)
if problems:
button = QMessageBox.question(self, "Validation failed", "%d problems have been found. Save validation messages?" % len(problems))
if button == QMessageBox.Yes:
filename, _ = os.path.splitext(self.filename)
filename, _ = QFileDialog.getSaveFileName(parent=self,
caption="Save validation messages",
directory=filename + '_validation.txt',
filter="Text Files (*.txt);;All Files (*.*)")
if filename:
with open(filename, 'w') as f:
f.writelines(problems)
else:
QMessageBox.information(self, "Validation successful", "No problems have been found.")
def addFileAssociation(self):
""" Associate *.fmu with the FMPy GUI """
try:
from winreg import HKEY_CURRENT_USER, KEY_WRITE, REG_SZ, OpenKey, CreateKey, SetValueEx, CloseKey
env = os.environ.get('CONDA_DEFAULT_ENV_')
if env is None:
python = sys.executable
root, ext = os.path.splitext(python)
pythonw = root + 'w' + ext
if os.path.isfile(pythonw):
python = pythonw
target = '"%s" -m fmpy.gui "%%1"' % python
else:
# activate the conda environment
for path in os.environ["PATH"].split(os.pathsep):
activate = os.path.join(path, 'activate.bat')
if os.path.isfile(activate):
break
windir = os.environ['WINDIR']
cmd = os.path.join(windir, 'System32', 'cmd.exe')
target = r'%s /C ""%s" %s && python -m fmpy.gui %%1"' % (cmd, activate, env)
key_path = r'Software\Classes\fmpy.gui\shell\open\command'
CreateKey(HKEY_CURRENT_USER, key_path)
key = OpenKey(HKEY_CURRENT_USER, key_path, 0, KEY_WRITE)
SetValueEx(key, '', 0, REG_SZ, target)
CloseKey(key)
key_path = r'SOFTWARE\Classes\.fmu'
CreateKey(HKEY_CURRENT_USER, key_path)
key = OpenKey(HKEY_CURRENT_USER, key_path, 0, KEY_WRITE)
SetValueEx(key, '', 0, REG_SZ, 'fmpy.gui')
CloseKey(key)
QMessageBox.information(self, "File association added", "The file association for *.fmu has been added")
except Exception as e:
QMessageBox.critical(self, "File association failed", "The file association for *.fmu could not be added. %s" % e)
def copyValueReference(self):
""" Copy the value references of the selected variables to the clipboard """
text = '\n'.join([str(v.valueReference) for v in self.getSelectedVariables()])
QApplication.clipboard().setText(text)
def copyVariableName(self):
""" Copy the names of the selected variables to the clipboard """
text = '\n'.join([str(v.name) for v in self.getSelectedVariables()])
QApplication.clipboard().setText(text)
def getSelectedVariables(self):
""" Returns a list of selected variables in the current view """
variables = []
if self.ui.variablesStackedWidget.currentWidget() == self.ui.treePage:
for index in self.ui.treeView.selectionModel().selectedRows():
sourceIndex = self.treeFilterModel.mapToSource(index)
treeItem = sourceIndex.internalPointer()
if treeItem.variable is not None:
variables.append(treeItem.variable)
else:
for index in self.ui.tableView.selectionModel().selectedRows():
sourceIndex = self.tableFilterModel.mapToSource(index)
variable = sourceIndex.internalPointer()
variables.append(variable)
return variables
def clearPlots(self):
""" Clear all plots """
self.selectedVariables.clear()
self.updatePlotLayout()
def createGraphics(self):
""" Create the graphical representation of the FMU's inputs and outputs """
def variableColor(variable):
if variable.type.startswith(('Float', 'Real')):
return QColor.fromRgb(26, 77, 179)
elif variable.type.startswith(('Enumeration', 'Int', 'UInt')):
return QColor.fromRgb(179, 77, 26)
elif variable.type == 'Boolean':
return QColor.fromRgb(255, 0, 255)
elif variable.type == 'String':
return QColor.fromRgb(26, 114, 16)
elif variable.type == 'Binary':
return QColor.fromRgb(81, 81, 81)
else:
return QColor.fromRgb(0, 0, 0)
inputVariables = []
outputVariables = []
maxInputLabelWidth = 0
maxOutputLabelWidth = 0
textItem = QGraphicsTextItem()
fontMetrics = QFontMetricsF(textItem.font())
for variable in self.modelDescription.modelVariables:
if variable.causality == 'input':
inputVariables.append(variable)
elif variable.causality == 'output':
outputVariables.append(variable)
for variable in inputVariables:
maxInputLabelWidth = max(maxInputLabelWidth, fontMetrics.width(variable.name))
for variable in outputVariables:
maxOutputLabelWidth = max(maxOutputLabelWidth, fontMetrics.width(variable.name))
from math import floor
scene = QGraphicsScene()
self.ui.graphicsView.setScene(scene)
group = QGraphicsItemGroup()
scene.addItem(group)
group.setPos(200.5, -50.5)
lh = 15 # line height
w = max(150., maxInputLabelWidth + maxOutputLabelWidth + 20)
h = max(50., 10 + lh * max(len(inputVariables), len(outputVariables)))
block = QGraphicsRectItem(0, 0, w, h, group)
block.setPen(QColor.fromRgb(0, 0, 0))
pen = QPen()
pen.setWidthF(1)
font = QFont()
font.setPixelSize(10)
# inputs
y = floor((h - len(inputVariables) * lh) / 2 - 2)
for variable in inputVariables:
text = QGraphicsTextItem(variable.name, group)
text.setDefaultTextColor(QColor.fromRgb(0, 0, 0))
text.setFont(font)
text.setX(3)
text.setY(y)
polygon = QPolygonF([QPointF(-8, y + 7.5), QPointF(-1, y + 11), QPointF(-8, y + 14.5)])
path = QPainterPath()
path.addPolygon(polygon)
path.closeSubpath()
contour = QGraphicsPathItem(path, group)
contour.setPen(QPen(Qt.NoPen))
contour.setBrush(variableColor(variable))
pen = QPen()
pen.setColor(variableColor(variable))
pen.setJoinStyle(Qt.MiterJoin)
contour.setPen(pen)
y += lh
# outputs
y = floor((h - len(outputVariables) * lh) / 2 - 2)
for variable in outputVariables:
text = QGraphicsTextItem(variable.name, group)
text.setDefaultTextColor(QColor.fromRgb(0, 0, 0))
text.setFont(font)
text.setX(w - 3 - text.boundingRect().width())
text.setY(y)
polygon = QPolygonF([QPointF(w + 1, y + 7.5), QPointF(w + 8, y + 11), QPointF(w + 1, y + 14.5)])
path = QPainterPath()
path.addPolygon(polygon)
path.closeSubpath()
contour = QGraphicsPathItem(path, group)
contour.setPen(QPen(Qt.NoPen))
contour.setBrush(variableColor(variable))
pen = QPen()
pen.setColor(variableColor(variable))
pen.setJoinStyle(Qt.MiterJoin)
contour.setPen(pen)
y += lh
def saveChanges(self):
from ..util import change_fmu
output_file, _ = QFileDialog.getSaveFileName(parent=self,
caption='Save Changed FMU',
directory=self.filename,
filter='FMUs (*.fmu)')
if output_file:
change_fmu(input_file=self.filename, output_file=output_file, start_values=self.startValues)
def loadStartValues(self):
from ..util import get_start_values
start_values = get_start_values(self.filename)
self.startValues.update(start_values)
self.ui.treeView.reset()
self.ui.tableView.reset()
def editTable(self):
""" Open the table dialog """
from .TableDialog import TableDialog
variables = self.getSelectedVariables()
if len(variables) == 1:
start_values = self.startValues.copy()
dialog = TableDialog(modelVariables=self.modelDescription.modelVariables,
variable=variables[0],
startValues=start_values)
if dialog.exec_() == QDialog.Accepted:
self.startValues.clear()
self.startValues.update(start_values)
def compilePlatformBinary(self, target_platform):
""" Compile the platform binary """
from ..util import compile_platform_binary
platforms = supported_platforms(self.filename)
if target_platform in platforms:
button = QMessageBox.question(self, "Platform binary already exists",
f'The FMU already contains a binary for the platform "{target_platform}".'
' Do you want to compile and overwrite the existing binary?')
if button == QMessageBox.No:
return
if self.modelDescription.fmiVersion == '3.0':
platform_map = {
'darwin64': 'x86_64-darwin',
'linux64': 'x86_64-linux',
'win32': 'x86-windows',
'win64': 'x86_64-windows',
}
target_platform = platform_map[target_platform]
try:
compile_platform_binary(self.filename, target_platform=target_platform)
except Exception as e:
QMessageBox.critical(self, "Failed to compile platform binaries", str(e))
return
self.load(self.filename)
def createJupyterNotebook(self):
""" Create a Juypyter Notebook to simulate the FMU """
from fmpy.util import create_jupyter_notebook
filename, ext = os.path.splitext(self.filename)
filename, _ = QFileDialog.getSaveFileName(
parent=self,
directory=filename + '.ipynb',
filter='Jupyter Notebooks (*.ipynb);;All Files (*)'
)
if filename:
try:
create_jupyter_notebook(self.filename, filename)
except Exception as e:
QMessageBox.critical(self, "Failed to create Jupyter Notebook", str(e))
return
if QMessageBox.question(self, "Open Jupyter Notebook?", f"Start Jupyter and open {filename}?") == QMessageBox.Yes:
from subprocess import run, CREATE_NEW_CONSOLE
try:
run(['jupyter', 'notebook', filename], creationflags=CREATE_NEW_CONSOLE)
except Exception as e:
QMessageBox.critical(self, "Failed to start Jupyter", str(e))
def createCMakeProject(self):
""" Create a CMake project from a C code FMU """
from fmpy.util import create_cmake_project
project_dir = QFileDialog.getExistingDirectory(
parent=self,
caption='Select CMake Project Folder',
directory=os.path.dirname(self.filename))
if project_dir:
create_cmake_project(self.filename, project_dir)
def addRemotingBinaries(self, host_platform, remote_platform):
from ..util import add_remoting
try:
add_remoting(self.filename, host_platform, remote_platform)
except Exception as e:
QMessageBox.warning(self, "Failed to add Remoting Binaries",
f"Failed to add remoting binaries to {self.filename}. {e}")
self.load(self.filename)
def addCoSimulationWrapper(self):
""" Add the Co-Simulation Wrapper to the FMU """
from ..cswrapper import add_cswrapper
try:
add_cswrapper(self.filename)
except Exception as e:
QMessageBox.warning(self, "Failed to add Co-Simulation Wrapper",
"Failed to add Co-Simulation Wrapper %s. %s" % (self.filename, e))
self.load(self.filename)
def importToModelica(self):
from os.path import dirname, join
from ..modelica import import_fmu_to_modelica
interface_type = self.fmiTypeComboBox.currentText()
if interface_type == 'Co-Simulation':
model_identifier = self.modelDescription.coSimulation.modelIdentifier
else:
model_identifier = self.modelDescription.modelExchange.modelIdentifier
filename, _ = QFileDialog.getSaveFileName(self,
caption="Save Modelica Model",
directory=join(dirname(self.filename), model_identifier + '.mo'),
filter='Modelica Model (*.mo)')
if filename:
try:
import_fmu_to_modelica(fmu_path=self.filename, model_path=filename, interface_type=interface_type)
except Exception as e:
QMessageBox.critical(self, "Failed create Modelica model", str(e)) | PypiClean |
/Eskapade_Spark-1.0.0-py3-none-any.whl/eskapadespark/tutorials/esk606_convert_spark_df.py | import pyspark
from eskapade import process_manager, ConfigObject, DataStore, Chain
from eskapade.logger import Logger
from eskapadespark import SparkManager, SparkDfConverter
logger = Logger()
logger.debug('Now parsing configuration file esk606_convert_spark_df.')
##########################################################################
# --- minimal analysis information
settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk606_convert_spark_df'
settings['version'] = 0
##########################################################################
# --- start Spark session
spark = process_manager.service(SparkManager).create_session(eskapade_settings=settings)
##########################################################################
# --- input data
ds = process_manager.service(DataStore)
rows = [(it, 'foo{:d}'.format(it), (it + 1) / 2.) for it in range(100)]
ds['df'] = spark.createDataFrame(rows, schema=['index', 'foo', 'bar'])
schema = ds['df'].schema
##########################################################################
# --- now set up the chains and links based on configuration flags
# define function to set number of partitions
def set_num_parts(df, max_num_parts):
"""Set number of partitions."""
if df.rdd.getNumPartitions() > max_num_parts:
df = df.repartition(max_num_parts)
return df
# define function to select rows from list
def filter_list(list_data, min_index):
"""Filter list by index."""
return list(filter(lambda r: r[0] >= min_index, list_data))
# define function to select rows in a pandas data frame
def filter_pd(pd_data, min_index):
"""Filter pandas dataframe by index."""
return pd_data[pd_data['index'] >= min_index]
# post-conversion process functions
process_methods = {'df': ['filter', set_num_parts],
'rdd': [pyspark.rdd.PipelinedRDD.filter, 'coalesce'],
'list': [filter_list],
'pd': [filter_pd]}
process_meth_args = {'df': {'filter': ('index > 19',)},
'rdd': {pyspark.rdd.PipelinedRDD.filter: (lambda r: r[0] > 19,), 'coalesce': (2,)},
'list': {},
'pd': {}}
process_meth_kwargs = {'df': {set_num_parts: dict(max_num_parts=2)},
'rdd': {},
'list': {filter_list: dict(min_index=20)},
'pd': {filter_pd: dict(min_index=20)}}
# create chain and data-frame-creator links
chain = Chain('Create')
for out_format in process_methods:
# create data-frame-conversion link
lnk = SparkDfConverter(name='df_to_{}_converter'.format(out_format),
read_key='df',
store_key='{}_output'.format(out_format),
schema_key='{}_schema'.format(out_format),
output_format=out_format,
preserve_col_names=False,
process_methods=process_methods[out_format],
process_meth_args=process_meth_args[out_format],
process_meth_kwargs=process_meth_kwargs[out_format])
# add link to chain
chain.add(lnk)
##########################################################################
logger.debug('Done parsing configuration file esk606_convert_spark_df.') | PypiClean |
/Gletscher-0.0.1.tar.gz/Gletscher-0.0.1/gletscher/config.py |
import logging
import os
import uuid
import configparser
from Crypto import Random
from gletscher.crypto import Crypter
from gletscher import hex
class BackupConfiguration(object):
@staticmethod
def _Prompt(prompt, verifier):
while True:
line = input("%s: " % prompt)
try:
if verifier(line.strip()):
return line.strip()
except:
pass
@staticmethod
def LoadFromFile(config_dir):
config = configparser.RawConfigParser()
config.read(os.path.join(config_dir, "backup.config"))
backup_config = BackupConfiguration(config_dir, config)
assert os.path.isdir(backup_config.catalog_dir_location()), \
"catalog directory does not exist"
assert os.path.isdir(backup_config.tmp_dir_location()), \
"tmp directory does not exist"
logging.basicConfig(
level=logging.DEBUG,
filename=backup_config.log_file_location(),
datefmt="%Y-%m-%d %H:%M:%S",
format="%(asctime)s %(levelname)s "
"%(name)s#%(funcName)s: %(message)s")
return backup_config
@staticmethod
def NewEmptyConfiguration(config_dir, prompt_command=None):
if not prompt_command:
prompt_command = BackupConfiguration._Prompt
id = uuid.uuid4()
secret_key = Random.get_random_bytes(32)
crypter = Crypter(secret_key)
signature = crypter.hash(id.bytes)
min_config = "\n".join([
"# gletscher configuration",
"",
"[id]",
"uuid = %s" % str(id),
"key = %s" % hex.b2h(secret_key),
"signature = %s" % hex.b2h(signature),
"",
"[aws]",
"region = %s" % prompt_command(
"AWS Region", verifier=lambda x: len(x) > 0),
"account_id = %d" % int(prompt_command(
"AWS Account ID", verifier=lambda x: int(x) > 0)),
"access_key = %s" % prompt_command(
"AWS Access Key", verifier=lambda x: len(x) > 0),
"secret_access_key = %s" % prompt_command(
"AWS Secret Access Key", verifier=lambda x: len(x) > 0),
"",
"[glacier]",
"vault_name = %s" % prompt_command(
"Glacier Vault Name", verifier=lambda x: len(x) > 0),
"",
"[dirs]",
"catalogs = catalogs",
"tmp = tmp",
"",
])
for dir in ("catalogs", "tmp"):
os.mkdir(os.path.join(config_dir, dir))
with open(os.path.join(config_dir, "backup.config"), "w") as f:
f.write(min_config)
return BackupConfiguration.LoadFromFile(config_dir)
def __init__(self, config_dir, config):
self._config_dir = config_dir
self._config = config
crypter = Crypter(self.secret_key())
signature = crypter.hash(self.uuid().bytes)
assert signature == hex.h2b(self._config.get("id", "signature")), \
"calculated signature does not match signature in config file"
def secret_key(self):
key = hex.h2b(self._config.get("id", "key"))
assert len(key) == 32
return key
def config_dir_location(self):
return self._config_dir
def index_file_location(self):
return os.path.join(self._config_dir, "index.gdbm")
def global_catalog_location(self):
return os.path.join(self._config_dir, "global.catalog")
def tmp_dir_location(self):
return os.path.join(
self._config_dir, self._config.get("dirs", "tmp"))
def catalog_dir_location(self):
return os.path.join(
self._config_dir, self._config.get("dirs", "catalogs"))
def catalog_location(self, catalog):
return os.path.join(self.catalog_dir_location(), "%s.catalog" % catalog)
def uuid(self):
return uuid.UUID(self._config.get("id", "uuid"))
def aws_region(self):
return self._config.get("aws", "region")
def aws_account_id(self):
return self._config.getint("aws", "account_id")
def vault_name(self):
return self._config.get("glacier", "vault_name")
def aws_access_key(self):
return self._config.get("aws", "access_key")
def aws_secret_access_key(self):
return self._config.get("aws", "secret_access_key")
def log_file_location(self):
return os.path.join(self._config_dir, "log.txt") | PypiClean |
/Babel-Thrive-1.11.tar.gz/Babel-Thrive-1.11/json_extractor.py | from collections import deque
from babel.messages.jslexer import tokenize, unquote_string
# This helper is copied to all necessary files because once again python imports are being a pain
def reopen_normal_read(file_obj, encoding):
"""Re-open a file obj in plain read mode"""
return open(file_obj.name, "r", encoding=encoding)
JSON_GETTEXT_KEYWORD = 'type'
JSON_GETTEXT_VALUE = 'gettext_string'
JSON_GETTEXT_KEY_CONTENT = 'content'
JSON_GETTEXT_KEY_ALT_CONTENT = 'alt_content'
JSON_GETTEXT_KEY_FUNCNAME = 'funcname'
class JsonExtractor(object):
def __init__(self, data):
self.state = 'start'
self.data = data
self.token_to_add = None
self.is_value = False
self.gettext_mode = False
self.current_key = None
self.in_array = False
self.nested_in_array = []
self.results = []
self.token_params = {}
# TODO: fix the duplicate name between this and the other add_result
def add_result(self, token):
value = unquote_string(token.value)
if value not in self.results:
self.results[value] = deque()
self.results[value].append(token.lineno)
def start_object(self):
self.gettext_mode = False
self.state = 'key'
# Things will be incorrect if an object is contained in an array, so
# we use a stack of states to return to like this in order to support
# that kind of JSON structures
self.nested_in_array.append(self.in_array)
self.in_array = False
def with_separator(self, token):
self.state = 'value'
def start_array(self):
self.in_array = True
def end_array(self):
self.in_array = False
self.end_pair()
def end_pair(self, add_gettext_object=False):
if self.token_to_add:
if not self.gettext_mode or (self.gettext_mode and add_gettext_object):
self.add_result(self.token_to_add)
if not self.in_array:
self.current_key = None
self.state = 'key'
def end_object(self):
self.end_pair(add_gettext_object=True)
self.gettext_mode = False
self.state = 'end'
self.in_array = self.nested_in_array.pop()
def add_result(self, token):
value = unquote_string(token.value)
result = dict(
line_number=token.lineno,
content=value
)
for key, value in self.token_params.items():
if key == 'alt_token':
result['alt_content'] = unquote_string(value.value)
result['alt_line_number'] = value.lineno
else:
result[key] = unquote_string(value)
self.results.append(result)
self.token_to_add = None
self.token_params = {}
def get_lines_data(self):
"""
Returns string:line_numbers list
Since all strings are unique it is OK to get line numbers this way.
Since same string can occur several times inside single .json file the values should be popped(FIFO) from the list
:rtype: list
"""
for token in tokenize(self.data):
if token.type == 'operator':
if token.value == '{':
self.start_object()
elif token.value == '[':
self.start_array()
elif token.value == ':':
self.with_separator(token)
elif token.value == '}':
self.end_object()
elif token.value == ']':
self.end_array()
elif token.value == ',':
self.end_pair()
elif token.type == 'string':
if self.state == 'key':
self.current_key = unquote_string(token.value)
if self.current_key == JSON_GETTEXT_KEYWORD:
self.gettext_mode = True
else:
# TODO: auto-detecting items to extract through the keywords passed to extract_json would be very nice
if self.current_key.lower() in (
"groupname", "displayname", "name", "message", "messages"):
self.token_to_add = token
return self.results
def extract_json(file_obj, keywords, comment_tags, options):
"""
Supports: gettext, ngettext. See package README or github ( https://github.com/tigrawap/pybabel-json ) for more usage info.
"""
with reopen_normal_read(file_obj, options.get('encoding', 'utf-8')) as f:
data = f.read()
json_extractor = JsonExtractor(data)
strings_data = json_extractor.get_lines_data()
for item in strings_data:
messages = [item['content']]
if item.get('funcname') == 'ngettext':
messages.append(item['alt_content'])
yield item['line_number'], item.get('funcname', 'gettext'), tuple(messages), [] | PypiClean |
/BetterScratchAPI-0.1.3.tar.gz/BetterScratchAPI-0.1.3/betterscratchapi/user.py | try:
import requests
except ModuleNotFoundError:
exit("Could not import the requests module!")
import warnings
class user:
# Allow Defining User, Ex. Command: u = user("ScratchToolBox")
def __init__(self, u):
self.user = u
# Get User ID (integer) Ex. 44834297
def id(self):
URL = "https://api.scratch.mit.edu/users/" + self.user
r = requests.get(url = URL)
data = r.json()
return data['id']
# Get User username (string) Ex. ScratchToolBox
def username(self):
URL = "https://api.scratch.mit.edu/users/" + self.user
r = requests.get(url = URL)
data = r.json()
return data['username']
# Get if user is Scratch Team (boolean) Ex. False
def is_scratchteam(self):
URL = "https://api.scratch.mit.edu/users/" + self.user
r = requests.get(url = URL)
data = r.json()
return data['scratchteam']
# Get user's join date (string) Ex. 2019-05-04T17:22:53.000Z
def joined(self):
URL = "https://api.scratch.mit.edu/users/" + self.user
r = requests.get(url = URL)
data = r.json()
return data['history']['joined']
# Get user's profile image URL (string) You can specify a supported size. Ex. https://cdn2.scratch.mit.edu/get_image/user/44834297_90x90.png?v=
def image(self, size="90x90"):
URL = "https://api.scratch.mit.edu/users/" + self.user
r = requests.get(url = URL)
data = r.json()
if size == "90x90":
return data['profile']['images']['90x90']
elif size == "60x60":
return data['profile']['images']['60x60']
elif size == "55x55":
return data['profile']['images']['55x55']
elif size == "50x50":
return data['profile']['images']['50x50']
elif size == "32x32":
return data['profile']['images']['32x32']
else:
print("\033[1;31;40mBetterScratchAPI Warning: Unsupported image size (" + size + ") given, default size (90x90) was used instead!\033[0;0m")
return data['profile']['images']['90x90']
# Get user's status ["What I'm Working On"] (string)
def status(self):
URL = "https://api.scratch.mit.edu/users/" + self.user
r = requests.get(url = URL)
data = r.json()
return data['profile']['status']
# Get user's bio ["About Me"] (string)
def bio(self):
URL = "https://api.scratch.mit.edu/users/" + self.user
r = requests.get(url = URL)
data = r.json()
return data['profile']['bio']
# Get user's country (string) Ex. United States
def country(self):
URL = "https://api.scratch.mit.edu/users/" + self.user
r = requests.get(url = URL)
data = r.json()
return data['country'] | PypiClean |
/ITI1480A-linux-3.0.tar.gz/ITI1480A-linux-3.0/README.rst | User-space driver and tools for ITI1480A USB analyser.
Home: http://github.com/vpelletier/ITI1480A-linux
Disclaimer
==========
I am not affiliated with International Test Instruments in any way.
ITI kindly accepted that I start reverse-engineering their windows-only
software so I could use with Linux the I bought from them.
I had no privileged access to any documentation of any kind.
Dependencies
============
- Python_ 2.4+ (though mostly tested with 2.7 nowadays) or pypy_ for ~7 times
faster parsing
- libusb_ 1.0
- python-libusb1_
- cycfx2prog_
- udev (should come from your distribution)
- ply_
Firmwares
---------
The ITI1480A analyser is composed of 2 programable chips:
- A Cypress FX2, to be programmed on first device enumeration after connection
to analysis computer. Its firmware transmits commands send by analysis
computer to the main analyser chip and streams capture data back.
This chip controls the "Host power" led: if the firmware is properly loaded,
this led must be on.
- An Altera Cyclone 2 FPGA, which produces data which is stored in capture
files by listening to events from the link under test. Its firmware is
loaded at the beginning of each capture session.
This chip controls the "Link power" and "Link activity" leds.
Just for completeness, there is a small flash chip which merely contains
power-on vendor and device identifiers so an unprogrammed ITI1480A FX2 can
still be distinguished from any other device's FX2.
FX2
+++
A free software implementation of this firmware exists, so you have two
options:
- Either using the proprietary firmware from
`1480A USB Protocol Analyzer Software`_, found under the name `ITI1480A.spt`
in installation directory.
- Or provided Free Software firmware, available pre-built or from source. If
you want to build it you need:
- fx2lib_
- sdcc_
Cyclone 2
+++++++++
No free software implementation exist (yet). Contributions welcome.
I the mean time, you need to use the proprietary implementation from
`1480A USB Protocol Analyzer Software`_, found under the name
`ulpitest.rbf` in installation directory.
`4.1.0b ultitest.rbf`_ (latest version as of this writing) has been released
stand-alone.
Installation
============
Notes: paths and udev group are suitable for at least Debian. You may need to
edit udev/ITI1480A.rules and adapt below paths.
::
python setup.py install
cp udev/ITI1480A.rules /etc/udev/rules.d/
cp udev/ITI1480A.sh /lib/udev/
wget -O /lib/firmware/ITI1480A.rbf http://www.internationaltestinstruments.com/Downloads/UlpiTest.rbf
- ITI's FX2 firmware::
spt2hex /path/to/ITI1480A.spt
cp ITI1480A_1.ihx /lib/firmware/ITI1480A.ihx
spt2hex may generate several files (ITI1480A_0.ihx, ITI1480A_1.ihx, ...), use
the highest-numbered one.
- Free Software FX2 firmware:
To rebuild from source::
FX2LIBDIR=/path_to/fx2lib/ make -C CY7C68013A
To install::
cp CY7C68013A/build/ITI1480A.ihx /lib/firmware/
To test installation, (re)plug your protocol analyser. If the "Host power" led
turns on within a few seconds, your FX2 firmware was successfully installed and
loaded, your analyser is ready to use.
Usage
=====
To start a capture::
iti1480a-capture > captured.usb
Send signal SIGINT (^C) or SIGTERM to stop the capture, wait analyser to push
all data to host and exit.
Send signal SIGTSTP (^Z) to pause the analyser, SIGCONT (fg) to resume.
To get a human-friendly text dump of a previos capture::
iti1480a-display -i captured.usb
To wath running capture without saving it::
iti1480a-capture | iti1480a-display -f
To watch running capture and save it for later analysis::
iti1480a-capture | iti1480a-display -ft captured.usb
By default, iti1480a-display hides a lot of verbose events, like NAK'ed and SOF
transactions, or EOP events. You can tweak its filtering using -q (quieter) and
-v (more verbose). Default verbosity level is 0, -q decrements it and -v
increments it. Verbosity levels go from -1 (most quiet) to 4 (most verbose).
Example outputs: https://github.com/vpelletier/ITI1480A-linux/tree/master/examples
Red timestamps mean that output is detected as being non-chronological. This
happens for implementation detail reasons, and is considered a bug
(`issue #4`_).
.. _Python: http://www.python.org/
.. _pypy: http://www.pypy.org/
.. _libusb: http://www.libusb.org/wiki/libusb-1.0
.. _python-libusb1: http://pypi.python.org/pypi/libusb1
.. _cycfx2prog: http://www.triplespark.net/elec/periph/USB-FX2/software/
.. _1480A USB Protocol Analyzer Software: http://www.internationaltestinstruments.com/
.. _4.1.0b ultitest.rbf: http://www.internationaltestinstruments.com/Downloads/UlpiTest.rbf
.. _ply: http://www.dabeaz.com/ply/
.. _fx2lib: https://github.com/djmuhlestein/fx2lib
.. _sdcc: http://sdcc.sourceforge.net
.. _issue #4: https://github.com/vpelletier/ITI1480A-linux/issues/4
| PypiClean |
/mrv-1.0.2-stable.zip/mrv-1.0.2-stable/mrv/maya/scene.py | __docformat__ = "restructuredtext"
import util as mutil
import mrv.util as util
import maya.OpenMaya as api
import maya.cmds as cmds
from mrv.path import make_path
import inspect
__all__ = [ 'Scene' ]
class _SceneEvent( mutil.CallbackEventBase ):
""" Implements Scene Callbacks"""
_checkCBSet = set( ( api.MSceneMessage.kBeforeNewCheck,
api.MSceneMessage.kBeforeSaveCheck ) )
_checkFileCBSet = set( ( api.MSceneMessage.kBeforeImportCheck,
api.MSceneMessage.kBeforeOpenCheck,
api.MSceneMessage.kBeforeExportCheck,
api.MSceneMessage.kBeforeReferenceCheck,
api.MSceneMessage.kBeforeLoadReferenceCheck ) )
#( Configuration
use_weakref = False
remove_on_error = True
weakref_sender = True
#) END configuration
# get the proper registration method
def _getRegisterFunction(self, eventID):
reg_method = api.MSceneMessage.addCallback
if eventID in self._checkCBSet:
reg_method = api.MSceneMessage.addCheckCallback
elif eventID in self._checkFileCBSet:
reg_method = api.MSceneMessage.addCheckFileCallback
# END find registration method
return reg_method
# END SceneEvent
class Scene( util.Singleton, util.EventSender ):
"""Singleton Class allowing access to the maya scene
You can register all events available in MSceneMessage easily usnig the following
syntax:
>>> scene.beforeSoftwareRender = myFunctionObject
"""
kFileTypeMap = { "" : "mayaAscii", # treat untitled scenes as ma
".ma" : "mayaAscii",
".mb" : "mayaBinary" }
#{ Events
sender_as_argument = False
# create events from 'kEventName', creating a corresponding event named
# 'eventName'
for eidName, eid in ((n,v) for n,v in inspect.getmembers(api.MSceneMessage) if n.startswith('k')):
locals()[util.uncapitalize(eidName[1:])] = _SceneEvent(eid)
# END for each message id to create
#} END events
#{ Edit Methods
@classmethod
def open( cls, scenepath=None, force=False, **kwargs ):
""" Open the scene at the given scenepath
:param scenepath: The path to the file to be opened
If None, the currently loaded file will reopened
:param force: if True, the new scene will be loaded although currently
loaded contains unsaved changes
:param kwargs: passed to *cmds.file*
:return: a Path to the loaded scene"""
if not scenepath:
scenepath = cls.name()
# NOTE: it will return the last loaded reference instead of the loaded file - lets fix this !
sourcePath = make_path( scenepath )
kwargs.pop('open', kwargs.pop('o', None))
kwargs.pop('force', kwargs.pop('f', None))
lastReference = cmds.file( sourcePath.abspath(), open=1, force=force, **kwargs )
return make_path( sourcePath )
@classmethod
def new( cls, force = False, **kwargs ):
""" Create a new scene
:param force: if True, the new scene will be created even though there
are unsaved modifications
:param kwargs: passed to *cmds.file*
:return: Path with name of the new file"""
kwargs.pop('new', kwargs.pop('n', None))
kwargs.pop('force', kwargs.pop('f', None))
return make_path( cmds.file( new = True, force = force, **kwargs ) )
@classmethod
def rename( cls, scenepath ):
"""Rename the currently loaded file to be the file at scenepath
:param scenepath: string or Path pointing describing the new location of the scene.
:return: Path to scenepath
:note: as opposed to the normal file -rename it will also adjust the extension
:raise RuntimeError: if the scene's extension is not supported."""
scenepath = make_path(scenepath)
try:
cmds.file( rename = scenepath.expandvars() )
cmds.file( type = cls.kFileTypeMap[ scenepath.ext() ] )
except KeyError:
raise RuntimeError( "Unsupported filetype of: " + scenepath )
# END exception handling
return scenepath
@classmethod
def save( cls, scenepath=None, autodeleteUnknown = False, **kwargs ):
"""Save the currently opened scene under scenepath in the respective format
:param scenepath: if None, the currently opened scene will be saved, otherwise
the name will be changed. Paths leading to the file will automatically be created.
:param autodeleteUnknown: if true, unknown nodes will automatically be deleted
before an attempt is made to change the maya file's type
:param kwargs: passed to cmds.file
:return: Path at which the scene has been saved."""
if scenepath is None or scenepath == "":
scenepath = cls.name( )
scenepath = make_path( scenepath )
curscene = cls.name()
try :
filetype = cls.kFileTypeMap[ scenepath.ext() ]
curscenetype = cls.kFileTypeMap[ curscene.ext() ]
except KeyError:
raise RuntimeError( "Unsupported filetype of: " + scenepath )
# is it a save as ?
if curscene != scenepath:
cls.rename(scenepath)
# assure path exists
parentdir = scenepath.dirname( )
if not parentdir.exists( ):
parentdir.makedirs( )
# END assure parent path exists
# delete unknown before changing types ( would result in an error otherwise )
if autodeleteUnknown and curscenetype != filetype:
cls.deleteUnknownNodes()
# END handle unkonwn nodes
# safe the file
kwargs.pop('save', kwargs.pop('s', None))
kwargs.pop('type', kwargs.pop('typ', None))
try:
return make_path( cmds.file( save=True, type=filetype, **kwargs ) )
except RuntimeError:
if curscene != cls.name():
cls.rename(curscene)
# END restore previous name on error
raise
# END exception handling
@classmethod
def export(cls, outputFile, nodeListOrIterable=None, **kwargs):
"""Export the given nodes or everything into the file at path
:param outputFile: Path object or path string to which the data should
be written to. Parent directories will be created as needed
:param nodeListOrIterable: if None, everything will be exported.
Otherwise it may be an MSelectionList ( recommended ), or a list of
Nodes, MObjects or MDagPaths
:param kwargs: passed to cmds.file, see the mel docs for modifying flags
:return: Path to which the data was exported"""
outputFile = make_path(outputFile)
if not outputFile.dirname().isdir():
outputFile.dirname().makedirs()
# END create parent dirs
prev_selection = None
if nodeListOrIterable is None:
kwargs['exportAll'] = True
else:
# export selected mode
kwargs['exportSelected'] = True
prev_selection = api.MSelectionList()
api.MGlobal.getActiveSelectionList(prev_selection)
import nt
nt.select(nt.toSelectionList(nodeListOrIterable))
# END handle nodes
typ = kwargs.pop('type', kwargs.pop('typ', cls.kFileTypeMap.get(outputFile.ext(), None)))
if typ is None:
raise RuntimeError("Invalid type in %s" % outputFile)
# END handle type
try:
cmds.file(outputFile, type=typ, **kwargs)
return outputFile
finally:
if prev_selection is not None:
api.MGlobal.setActiveSelectionList(prev_selection)
# END if we have a selection to restore
# END handle selection
#} END edit methods
#{ Utilities
@classmethod
def deleteUnknownNodes( cls ):
"""Deletes all unknown nodes in the scene
:note: only do this if you are about to change the type of the scene during
save or export - otherwise the operation would fail if there are still unknown nodes
in the scene"""
unknownNodes = cmds.ls( type="unknown" ) # using mel is the faatest here
if unknownNodes:
cmds.delete( unknownNodes )
#} END utilities
#{ Query Methods
@classmethod
def name( cls ):
return make_path( cmds.file( q=1, exn=1 ) )
@classmethod
def isModified( cls ):
return cmds.file( q=1, amf=True )
#} END query methods
# END SCENE | PypiClean |
/Datargsing-0.2.15-py3-none-any.whl/datargsing/datargsing.py | from datargsing.datargsing_core import datargsing_Complete, datargsing_Error, JSON_Manage as jmC, CSV_Manage as cmC, CSV_JSON_Convert as cjcC
from datargsing.datargsing_tools import datargsing_Failure, Datargsing_Engine
from random import randint
from datargsing.datargsing_version import __version__
class CSV_JSON_Manager:
"""
Datargsing CSV And JSON Managing and Converting Class
"""
def __init__(self):
"""
Datargsing CSV And JSON Managing and Converting Class
"""
self.JM = jmC()
self.CM = cmC()
self.CJC = cjcC()
def get_from_json(self, path: str, debug: bool = False) -> dict | datargsing_Error:
"""
Get From A JSON (Formated/Like or .json) File:
-> path : (str) the path of the json (formated/like or .json) file
-> debug : (bool) [False] a debug state for Error Class
=> Return a (dict) object or a (datargsing_Error) object {For more details: view (datargsing_Error) info}
"""
assert type(path) == str, "{ path } must be a str"
assert type(debug) == bool, "{ debug } must be a bool"
if path.endswith('.json'):
return self.JM.get_from_file(path=path,debug=debug)
else:
return self.JM.get_from_file_like_json(path=path,debug=debug)
def get_from_csv(self, path: str, separator: str = ',', debug: bool = False) -> tuple | datargsing_Error:
"""
Get From A CSV (Formated/Like or .csv) File:
-> path : (str) the path of the csv (formated/like or .csv) file
-> separator : (str) [','] the separator in the csv (formated/like or .csv) file
-> debug : (bool) [False] a debug state for Error Class
=> Return a (tuple) object or a (datargsing_Error) object {For more details: view (datargsing_Error) info}
=-=-> output tuple format:
-> Index 0 : a list with descriptors/entries
-> Index 1 : a list of (sub-)list, each (sub-)list is a line under descriptors/entries
"""
assert type(path) == str, "{ path } must be a str"
assert type(separator) == str, "{ separator } must be a str"
assert type(debug) == bool, "{ debug } must be a bool"
if path.endswith('.csv'):
return self.CM.get_from_file(path=path,separator=separator,debug=debug)
else:
return self.CM.get_from_file_like_csv(path=path,separator=separator,debug=debug)
def set_to_json(self, path: str, content: dict, debug: bool = False) -> datargsing_Complete | datargsing_Error:
"""
Set To A JSON (Formated/Like or .json) File:
-> path : (str) the path of the json (formated/like or .json) output file
-> debug : (str) [False] a debug state for Error Class and Completion Class
=> Return a (datargsing_Complete) object or a (datargsing_Error) object {For more details: view (datargsing_Complete) info and/or (datargsing_Error) info}
"""
assert type(path) == str, "{ path } must be a str"
assert type(debug) == bool, "{ debug } must be a bool"
if path.endswith('.json'):
return self.JM.set_to_file(path=path,content=content,debug=debug)
else:
return self.JM.set_to_file_like_json(path=path,content=content,debug=debug)
def set_to_csv(self, path: str, content: tuple, separator: str = ',', debug: bool = False) -> datargsing_Complete | datargsing_Error:
"""
Set To A CSV (Formated/Like or .csv) File:
-> path : (str) the path of the csv (formated/like or .csv) output file
-> content : (tuple) the content of the csv (formated/like or .csv) output file
-> separator : (str) [','] the separator in the csv (formated/like or .csv) output file
-> debug : (str) [False] a debug state for Error Class and Completion Class
=> Return a (datargsing_Complete) object or a (datargsing_Error) object {For more details: view (datargsing_Complete) info and/or (datargsing_Error) info}
=-=-> content tuple format:
-> Index 0 : a list with descriptors/entries
-> Index 1 : a list of (sub-)list, each (sub-)list is a line under descriptors/entries
"""
assert type(path) == str, "{ path } must be a str"
assert type(separator) == str, "{ separator } must be a str"
assert type(debug) == bool, "{ debug } must be a bool"
if path.endswith('.csv'):
return self.CM.set_to_file(path=path,content=content,separator=separator,debug=debug)
else:
return self.CM.set_to_file_like_csv(path=path,content=content,separator=separator,debug=debug)
def write_json_from_csv(self, path_csv: str, path_json: str, csv_separator: str = ',', debug: bool = False) -> datargsing_Complete | datargsing_Error:
"""
Write The JSON Formated CSV Content From A CSV (Formated/Like or .csv) File To A JSON (Formated/Like or .json) File:
-> path_csv : (str) the path of the csv (formated/like or .csv) file
-> path_json : (str) the path of the json (formated/like or .json) output file
-> csv_separator : (str) [','] the separator in the csv (formated/like or .csv) file
-> debug : (str) [False] a debug state for Error Class and Completion Class
=> Return a (datargsing_Complete) object or a (datargsing_Error) object {For more details: view (datargsing_Complete) info and/or (datargsing_Error) info}
"""
assert type(path_csv) == str, "{ path_csv } must be a str"
assert type(path_json) == str, "{ path_json } must be a str"
assert type(csv_separator) == str, "{ csv_separator } must be a str"
assert type(debug) == bool, "{ debug } must be a bool"
if path_csv.endswith('.csv'):
if path_json.endswith('.json'):
return self.CJC.csv_json_get_set(path_csv=path_csv,path_json=path_json,csv_separator=csv_separator,debug=debug)
else:
return self.CJC.csv_json_like_get_set(path_csv=path_csv,path_json=path_json,csv_separator=csv_separator,debug=debug)
else:
if path_json.endswith('.json'):
return self.CJC.csv_like_json_get_set(path_csv=path_csv,path_json=path_json,csv_separator=csv_separator,debug=debug)
else:
return self.CJC.csv_like_json_like_get_set(path_csv=path_csv,path_json=path_json,csv_separator=csv_separator,debug=debug)
def write_csv_from_json(self, path_json: str, path_csv: str, csv_separator: str = ',', debug: bool = False) -> datargsing_Complete | datargsing_Error:
"""
Write The CSV Formated JSON Content From A JSON (Formated/Like or .json) File To A CSV (Formated/Like or .csv) File:
-> path_json : (str) the path of the json (formated/like or .json) file
-> path_csv : (str) the path of the csv (formated/like or .csv) output file
-> csv_separator : (str) [','] the separator in the csv (formated/like or .csv) output file
-> debug : (str) [False] a debug state for Error Class and Completion Class
=> Return a (datargsing_Complete) object or a (datargsing_Error) object {For more details: view (datargsing_Complete) info and/or (datargsing_Error) info}
"""
assert type(path_csv) == str, "{ path_csv } must be a str"
assert type(path_json) == str, "{ path_json } must be a str"
assert type(csv_separator) == str, "{ csv_separator } must be a str"
assert type(debug) == bool, "{ debug } must be a bool"
if path_json.endswith('.json'):
if path_csv.endswith('.csv'):
return self.CJC.json_csv_get_set(path_json=path_json,path_csv=path_csv,csv_separator=csv_separator,debug=debug)
else:
return self.CJC.json_csv_like_get_set(path_json=path_json,path_csv=path_csv,csv_separator=csv_separator,debug=debug)
else:
if path_csv.endswith('.csv'):
return self.CJC.json_like_csv_get_set(path_json=path_json,path_csv=path_csv,csv_separator=csv_separator,debug=debug)
else:
return self.CJC.json_like_csv_like_get_set(path_json=path_json,path_csv=path_csv,csv_separator=csv_separator,debug=debug)
def get_json_from_csv(self, path_csv: str, csv_separator: str = ',', debug: bool = False) -> dict | datargsing_Error:
"""
Get The JSON Formated CSV Content From A CSV (Formated/like or .csv) File:
-> path_csv : (str) the path of the csv (formated/like or .csv) file
-> csv_separator : (str) [','] the separator in the csv (formated/like or .csv) file
-> debug : (str) [False] a debug state for Error Class
=> Return a (dict) object or a (datargsing_Error) object {For more details: view (datargsing_Error) info}
"""
assert type(path_csv) == str, "{ path_csv } must be a str"
assert type(csv_separator) == str, "{ csv_separator } must be a str"
assert type(debug) == bool, "{ debug } must be a bool"
if path_csv.endswith('.csv'):
return self.CJC.csv_json_get(path_csv=path_csv,csv_separator=csv_separator,debug=debug)
else:
return self.CJC.csv_like_json_get(path_csv=path_csv,csv_separator=csv_separator,debug=debug)
def get_csv_from_json(self, path_json: str, debug: bool = False) -> tuple | datargsing_Error:
"""
Get The CSV Formated JSON Content From A JSON (Formated/Like or .json) File:
-> path_json : (str) the path of the json (formated/like or .json) file
-> debug : (str) [False] a debug state for Error Class
=> Return a (tuple) object or a (datargsing_Error) object {For more details: view (datargsing_Error) info}
=-=-> output tuple format:
-> Index 0 : a list with descriptors/entries
-> Index 1 : a list of (sub-)list, each (sub-)list is a line under descriptors/entries
"""
assert type(path_json) == str, "{ path_json } must be a str"
assert type(debug) == bool, "{ debug } must be a bool"
if path_json.endswith('.json'):
return self.CJC.json_csv_get(path_json=path_json,debug=debug)
else:
return self.CJC.json_like_csv_get(path_json=path_json,debug=debug)
class Tools:
"""
Datargsing Tools Class
"""
def __init__(self):
"""
Datargsing Tools Class
"""
self.de = Datargsing_Engine()
def location(self, main: str, wanted: str) -> int | list[int] | datargsing_Failure:
"""
Return the single index (or all indexes) of {wanted} in {main}
"""
assert type(main) == str, "{ main } must be a str"
assert type(wanted) == str, "{ wanted } must be a str"
temp = self.de.locate_all(main=main, wanted=wanted)
if type(temp) == datargsing_Failure:
return datargsing_Failure()
else:
return temp
def count(self, main: str, wanted: str) -> int:
"""
Return the number of {wanted} in {main}
"""
assert type(main) == str, "{ main } must be a str"
assert type(wanted) == str, "{ wanted } must be a str"
temp = self.location(main=main,wanted=wanted)
if type(temp) == datargsing_Failure:
return 0
elif type(temp) == int:
return 1
else:
return len(temp)
def get_one_random_location(self, main: str, wanted: str) -> int | datargsing_Failure:
"""
Return one random location of {wanted} in {main}
"""
assert type(main) == str, "{ main } must be a str"
assert type(wanted) == str, "{ wanted } must be a str"
temp = self.location(main=main,wanted=wanted)
if type(temp) == datargsing_Failure:
return datargsing_Failure()
elif type(temp) == int:
return temp
else:
return temp[randint(0,len(temp)-1)] | PypiClean |
/CudaPyInt-0.0.1.tar.gz/CudaPyInt-0.0.1/doc/build/html/_static/websupport.js | (function($) {
$.fn.autogrow = function() {
return this.each(function() {
var textarea = this;
$.fn.autogrow.resize(textarea);
$(textarea)
.focus(function() {
textarea.interval = setInterval(function() {
$.fn.autogrow.resize(textarea);
}, 500);
})
.blur(function() {
clearInterval(textarea.interval);
});
});
};
$.fn.autogrow.resize = function(textarea) {
var lineHeight = parseInt($(textarea).css('line-height'), 10);
var lines = textarea.value.split('\n');
var columns = textarea.cols;
var lineCount = 0;
$.each(lines, function() {
lineCount += Math.ceil(this.length / columns) || 1;
});
var height = lineHeight * (lineCount + 1);
$(textarea).css('height', height);
};
})(jQuery);
(function($) {
var comp, by;
function init() {
initEvents();
initComparator();
}
function initEvents() {
$('a.comment-close').live("click", function(event) {
event.preventDefault();
hide($(this).attr('id').substring(2));
});
$('a.vote').live("click", function(event) {
event.preventDefault();
handleVote($(this));
});
$('a.reply').live("click", function(event) {
event.preventDefault();
openReply($(this).attr('id').substring(2));
});
$('a.close-reply').live("click", function(event) {
event.preventDefault();
closeReply($(this).attr('id').substring(2));
});
$('a.sort-option').live("click", function(event) {
event.preventDefault();
handleReSort($(this));
});
$('a.show-proposal').live("click", function(event) {
event.preventDefault();
showProposal($(this).attr('id').substring(2));
});
$('a.hide-proposal').live("click", function(event) {
event.preventDefault();
hideProposal($(this).attr('id').substring(2));
});
$('a.show-propose-change').live("click", function(event) {
event.preventDefault();
showProposeChange($(this).attr('id').substring(2));
});
$('a.hide-propose-change').live("click", function(event) {
event.preventDefault();
hideProposeChange($(this).attr('id').substring(2));
});
$('a.accept-comment').live("click", function(event) {
event.preventDefault();
acceptComment($(this).attr('id').substring(2));
});
$('a.delete-comment').live("click", function(event) {
event.preventDefault();
deleteComment($(this).attr('id').substring(2));
});
$('a.comment-markup').live("click", function(event) {
event.preventDefault();
toggleCommentMarkupBox($(this).attr('id').substring(2));
});
}
/**
* Set comp, which is a comparator function used for sorting and
* inserting comments into the list.
*/
function setComparator() {
// If the first three letters are "asc", sort in ascending order
// and remove the prefix.
if (by.substring(0,3) == 'asc') {
var i = by.substring(3);
comp = function(a, b) { return a[i] - b[i]; };
} else {
// Otherwise sort in descending order.
comp = function(a, b) { return b[by] - a[by]; };
}
// Reset link styles and format the selected sort option.
$('a.sel').attr('href', '#').removeClass('sel');
$('a.by' + by).removeAttr('href').addClass('sel');
}
/**
* Create a comp function. If the user has preferences stored in
* the sortBy cookie, use those, otherwise use the default.
*/
function initComparator() {
by = 'rating'; // Default to sort by rating.
// If the sortBy cookie is set, use that instead.
if (document.cookie.length > 0) {
var start = document.cookie.indexOf('sortBy=');
if (start != -1) {
start = start + 7;
var end = document.cookie.indexOf(";", start);
if (end == -1) {
end = document.cookie.length;
by = unescape(document.cookie.substring(start, end));
}
}
}
setComparator();
}
/**
* Show a comment div.
*/
function show(id) {
$('#ao' + id).hide();
$('#ah' + id).show();
var context = $.extend({id: id}, opts);
var popup = $(renderTemplate(popupTemplate, context)).hide();
popup.find('textarea[name="proposal"]').hide();
popup.find('a.by' + by).addClass('sel');
var form = popup.find('#cf' + id);
form.submit(function(event) {
event.preventDefault();
addComment(form);
});
$('#s' + id).after(popup);
popup.slideDown('fast', function() {
getComments(id);
});
}
/**
* Hide a comment div.
*/
function hide(id) {
$('#ah' + id).hide();
$('#ao' + id).show();
var div = $('#sc' + id);
div.slideUp('fast', function() {
div.remove();
});
}
/**
* Perform an ajax request to get comments for a node
* and insert the comments into the comments tree.
*/
function getComments(id) {
$.ajax({
type: 'GET',
url: opts.getCommentsURL,
data: {node: id},
success: function(data, textStatus, request) {
var ul = $('#cl' + id);
var speed = 100;
$('#cf' + id)
.find('textarea[name="proposal"]')
.data('source', data.source);
if (data.comments.length === 0) {
ul.html('<li>No comments yet.</li>');
ul.data('empty', true);
} else {
// If there are comments, sort them and put them in the list.
var comments = sortComments(data.comments);
speed = data.comments.length * 100;
appendComments(comments, ul);
ul.data('empty', false);
}
$('#cn' + id).slideUp(speed + 200);
ul.slideDown(speed);
},
error: function(request, textStatus, error) {
showError('Oops, there was a problem retrieving the comments.');
},
dataType: 'json'
});
}
/**
* Add a comment via ajax and insert the comment into the comment tree.
*/
function addComment(form) {
var node_id = form.find('input[name="node"]').val();
var parent_id = form.find('input[name="parent"]').val();
var text = form.find('textarea[name="comment"]').val();
var proposal = form.find('textarea[name="proposal"]').val();
if (text == '') {
showError('Please enter a comment.');
return;
}
// Disable the form that is being submitted.
form.find('textarea,input').attr('disabled', 'disabled');
// Send the comment to the server.
$.ajax({
type: "POST",
url: opts.addCommentURL,
dataType: 'json',
data: {
node: node_id,
parent: parent_id,
text: text,
proposal: proposal
},
success: function(data, textStatus, error) {
// Reset the form.
if (node_id) {
hideProposeChange(node_id);
}
form.find('textarea')
.val('')
.add(form.find('input'))
.removeAttr('disabled');
var ul = $('#cl' + (node_id || parent_id));
if (ul.data('empty')) {
$(ul).empty();
ul.data('empty', false);
}
insertComment(data.comment);
var ao = $('#ao' + node_id);
ao.find('img').attr({'src': opts.commentBrightImage});
if (node_id) {
// if this was a "root" comment, remove the commenting box
// (the user can get it back by reopening the comment popup)
$('#ca' + node_id).slideUp();
}
},
error: function(request, textStatus, error) {
form.find('textarea,input').removeAttr('disabled');
showError('Oops, there was a problem adding the comment.');
}
});
}
/**
* Recursively append comments to the main comment list and children
* lists, creating the comment tree.
*/
function appendComments(comments, ul) {
$.each(comments, function() {
var div = createCommentDiv(this);
ul.append($(document.createElement('li')).html(div));
appendComments(this.children, div.find('ul.comment-children'));
// To avoid stagnating data, don't store the comments children in data.
this.children = null;
div.data('comment', this);
});
}
/**
* After adding a new comment, it must be inserted in the correct
* location in the comment tree.
*/
function insertComment(comment) {
var div = createCommentDiv(comment);
// To avoid stagnating data, don't store the comments children in data.
comment.children = null;
div.data('comment', comment);
var ul = $('#cl' + (comment.node || comment.parent));
var siblings = getChildren(ul);
var li = $(document.createElement('li'));
li.hide();
// Determine where in the parents children list to insert this comment.
for(i=0; i < siblings.length; i++) {
if (comp(comment, siblings[i]) <= 0) {
$('#cd' + siblings[i].id)
.parent()
.before(li.html(div));
li.slideDown('fast');
return;
}
}
// If we get here, this comment rates lower than all the others,
// or it is the only comment in the list.
ul.append(li.html(div));
li.slideDown('fast');
}
function acceptComment(id) {
$.ajax({
type: 'POST',
url: opts.acceptCommentURL,
data: {id: id},
success: function(data, textStatus, request) {
$('#cm' + id).fadeOut('fast');
$('#cd' + id).removeClass('moderate');
},
error: function(request, textStatus, error) {
showError('Oops, there was a problem accepting the comment.');
}
});
}
function deleteComment(id) {
$.ajax({
type: 'POST',
url: opts.deleteCommentURL,
data: {id: id},
success: function(data, textStatus, request) {
var div = $('#cd' + id);
if (data == 'delete') {
// Moderator mode: remove the comment and all children immediately
div.slideUp('fast', function() {
div.remove();
});
return;
}
// User mode: only mark the comment as deleted
div
.find('span.user-id:first')
.text('[deleted]').end()
.find('div.comment-text:first')
.text('[deleted]').end()
.find('#cm' + id + ', #dc' + id + ', #ac' + id + ', #rc' + id +
', #sp' + id + ', #hp' + id + ', #cr' + id + ', #rl' + id)
.remove();
var comment = div.data('comment');
comment.username = '[deleted]';
comment.text = '[deleted]';
div.data('comment', comment);
},
error: function(request, textStatus, error) {
showError('Oops, there was a problem deleting the comment.');
}
});
}
function showProposal(id) {
$('#sp' + id).hide();
$('#hp' + id).show();
$('#pr' + id).slideDown('fast');
}
function hideProposal(id) {
$('#hp' + id).hide();
$('#sp' + id).show();
$('#pr' + id).slideUp('fast');
}
function showProposeChange(id) {
$('#pc' + id).hide();
$('#hc' + id).show();
var textarea = $('#pt' + id);
textarea.val(textarea.data('source'));
$.fn.autogrow.resize(textarea[0]);
textarea.slideDown('fast');
}
function hideProposeChange(id) {
$('#hc' + id).hide();
$('#pc' + id).show();
var textarea = $('#pt' + id);
textarea.val('').removeAttr('disabled');
textarea.slideUp('fast');
}
function toggleCommentMarkupBox(id) {
$('#mb' + id).toggle();
}
/** Handle when the user clicks on a sort by link. */
function handleReSort(link) {
var classes = link.attr('class').split(/\s+/);
for (var i=0; i<classes.length; i++) {
if (classes[i] != 'sort-option') {
by = classes[i].substring(2);
}
}
setComparator();
// Save/update the sortBy cookie.
var expiration = new Date();
expiration.setDate(expiration.getDate() + 365);
document.cookie= 'sortBy=' + escape(by) +
';expires=' + expiration.toUTCString();
$('ul.comment-ul').each(function(index, ul) {
var comments = getChildren($(ul), true);
comments = sortComments(comments);
appendComments(comments, $(ul).empty());
});
}
/**
* Function to process a vote when a user clicks an arrow.
*/
function handleVote(link) {
if (!opts.voting) {
showError("You'll need to login to vote.");
return;
}
var id = link.attr('id');
if (!id) {
// Didn't click on one of the voting arrows.
return;
}
// If it is an unvote, the new vote value is 0,
// Otherwise it's 1 for an upvote, or -1 for a downvote.
var value = 0;
if (id.charAt(1) != 'u') {
value = id.charAt(0) == 'u' ? 1 : -1;
}
// The data to be sent to the server.
var d = {
comment_id: id.substring(2),
value: value
};
// Swap the vote and unvote links.
link.hide();
$('#' + id.charAt(0) + (id.charAt(1) == 'u' ? 'v' : 'u') + d.comment_id)
.show();
// The div the comment is displayed in.
var div = $('div#cd' + d.comment_id);
var data = div.data('comment');
// If this is not an unvote, and the other vote arrow has
// already been pressed, unpress it.
if ((d.value !== 0) && (data.vote === d.value * -1)) {
$('#' + (d.value == 1 ? 'd' : 'u') + 'u' + d.comment_id).hide();
$('#' + (d.value == 1 ? 'd' : 'u') + 'v' + d.comment_id).show();
}
// Update the comments rating in the local data.
data.rating += (data.vote === 0) ? d.value : (d.value - data.vote);
data.vote = d.value;
div.data('comment', data);
// Change the rating text.
div.find('.rating:first')
.text(data.rating + ' point' + (data.rating == 1 ? '' : 's'));
// Send the vote information to the server.
$.ajax({
type: "POST",
url: opts.processVoteURL,
data: d,
error: function(request, textStatus, error) {
showError('Oops, there was a problem casting that vote.');
}
});
}
/**
* Open a reply form used to reply to an existing comment.
*/
function openReply(id) {
// Swap out the reply link for the hide link
$('#rl' + id).hide();
$('#cr' + id).show();
// Add the reply li to the children ul.
var div = $(renderTemplate(replyTemplate, {id: id})).hide();
$('#cl' + id)
.prepend(div)
// Setup the submit handler for the reply form.
.find('#rf' + id)
.submit(function(event) {
event.preventDefault();
addComment($('#rf' + id));
closeReply(id);
})
.find('input[type=button]')
.click(function() {
closeReply(id);
});
div.slideDown('fast', function() {
$('#rf' + id).find('textarea').focus();
});
}
/**
* Close the reply form opened with openReply.
*/
function closeReply(id) {
// Remove the reply div from the DOM.
$('#rd' + id).slideUp('fast', function() {
$(this).remove();
});
// Swap out the hide link for the reply link
$('#cr' + id).hide();
$('#rl' + id).show();
}
/**
* Recursively sort a tree of comments using the comp comparator.
*/
function sortComments(comments) {
comments.sort(comp);
$.each(comments, function() {
this.children = sortComments(this.children);
});
return comments;
}
/**
* Get the children comments from a ul. If recursive is true,
* recursively include childrens' children.
*/
function getChildren(ul, recursive) {
var children = [];
ul.children().children("[id^='cd']")
.each(function() {
var comment = $(this).data('comment');
if (recursive)
comment.children = getChildren($(this).find('#cl' + comment.id), true);
children.push(comment);
});
return children;
}
/** Create a div to display a comment in. */
function createCommentDiv(comment) {
if (!comment.displayed && !opts.moderator) {
return $('<div class="moderate">Thank you! Your comment will show up '
+ 'once it is has been approved by a moderator.</div>');
}
// Prettify the comment rating.
comment.pretty_rating = comment.rating + ' point' +
(comment.rating == 1 ? '' : 's');
// Make a class (for displaying not yet moderated comments differently)
comment.css_class = comment.displayed ? '' : ' moderate';
// Create a div for this comment.
var context = $.extend({}, opts, comment);
var div = $(renderTemplate(commentTemplate, context));
// If the user has voted on this comment, highlight the correct arrow.
if (comment.vote) {
var direction = (comment.vote == 1) ? 'u' : 'd';
div.find('#' + direction + 'v' + comment.id).hide();
div.find('#' + direction + 'u' + comment.id).show();
}
if (opts.moderator || comment.text != '[deleted]') {
div.find('a.reply').show();
if (comment.proposal_diff)
div.find('#sp' + comment.id).show();
if (opts.moderator && !comment.displayed)
div.find('#cm' + comment.id).show();
if (opts.moderator || (opts.username == comment.username))
div.find('#dc' + comment.id).show();
}
return div;
}
/**
* A simple template renderer. Placeholders such as <%id%> are replaced
* by context['id'] with items being escaped. Placeholders such as <#id#>
* are not escaped.
*/
function renderTemplate(template, context) {
var esc = $(document.createElement('div'));
function handle(ph, escape) {
var cur = context;
$.each(ph.split('.'), function() {
cur = cur[this];
});
return escape ? esc.text(cur || "").html() : cur;
}
return template.replace(/<([%#])([\w\.]*)\1>/g, function() {
return handle(arguments[2], arguments[1] == '%' ? true : false);
});
}
/** Flash an error message briefly. */
function showError(message) {
$(document.createElement('div')).attr({'class': 'popup-error'})
.append($(document.createElement('div'))
.attr({'class': 'error-message'}).text(message))
.appendTo('body')
.fadeIn("slow")
.delay(2000)
.fadeOut("slow");
}
/** Add a link the user uses to open the comments popup. */
$.fn.comment = function() {
return this.each(function() {
var id = $(this).attr('id').substring(1);
var count = COMMENT_METADATA[id];
var title = count + ' comment' + (count == 1 ? '' : 's');
var image = count > 0 ? opts.commentBrightImage : opts.commentImage;
var addcls = count == 0 ? ' nocomment' : '';
$(this)
.append(
$(document.createElement('a')).attr({
href: '#',
'class': 'sphinx-comment-open' + addcls,
id: 'ao' + id
})
.append($(document.createElement('img')).attr({
src: image,
alt: 'comment',
title: title
}))
.click(function(event) {
event.preventDefault();
show($(this).attr('id').substring(2));
})
)
.append(
$(document.createElement('a')).attr({
href: '#',
'class': 'sphinx-comment-close hidden',
id: 'ah' + id
})
.append($(document.createElement('img')).attr({
src: opts.closeCommentImage,
alt: 'close',
title: 'close'
}))
.click(function(event) {
event.preventDefault();
hide($(this).attr('id').substring(2));
})
);
});
};
var opts = {
processVoteURL: '/_process_vote',
addCommentURL: '/_add_comment',
getCommentsURL: '/_get_comments',
acceptCommentURL: '/_accept_comment',
deleteCommentURL: '/_delete_comment',
commentImage: '/static/_static/comment.png',
closeCommentImage: '/static/_static/comment-close.png',
loadingImage: '/static/_static/ajax-loader.gif',
commentBrightImage: '/static/_static/comment-bright.png',
upArrow: '/static/_static/up.png',
downArrow: '/static/_static/down.png',
upArrowPressed: '/static/_static/up-pressed.png',
downArrowPressed: '/static/_static/down-pressed.png',
voting: false,
moderator: false
};
if (typeof COMMENT_OPTIONS != "undefined") {
opts = jQuery.extend(opts, COMMENT_OPTIONS);
}
var popupTemplate = '\
<div class="sphinx-comments" id="sc<%id%>">\
<p class="sort-options">\
Sort by:\
<a href="#" class="sort-option byrating">best rated</a>\
<a href="#" class="sort-option byascage">newest</a>\
<a href="#" class="sort-option byage">oldest</a>\
</p>\
<div class="comment-header">Comments</div>\
<div class="comment-loading" id="cn<%id%>">\
loading comments... <img src="<%loadingImage%>" alt="" /></div>\
<ul id="cl<%id%>" class="comment-ul"></ul>\
<div id="ca<%id%>">\
<p class="add-a-comment">Add a comment\
(<a href="#" class="comment-markup" id="ab<%id%>">markup</a>):</p>\
<div class="comment-markup-box" id="mb<%id%>">\
reStructured text markup: <i>*emph*</i>, <b>**strong**</b>, \
<tt>``code``</tt>, \
code blocks: <tt>::</tt> and an indented block after blank line</div>\
<form method="post" id="cf<%id%>" class="comment-form" action="">\
<textarea name="comment" cols="80"></textarea>\
<p class="propose-button">\
<a href="#" id="pc<%id%>" class="show-propose-change">\
Propose a change ▹\
</a>\
<a href="#" id="hc<%id%>" class="hide-propose-change">\
Propose a change ▿\
</a>\
</p>\
<textarea name="proposal" id="pt<%id%>" cols="80"\
spellcheck="false"></textarea>\
<input type="submit" value="Add comment" />\
<input type="hidden" name="node" value="<%id%>" />\
<input type="hidden" name="parent" value="" />\
</form>\
</div>\
</div>';
var commentTemplate = '\
<div id="cd<%id%>" class="sphinx-comment<%css_class%>">\
<div class="vote">\
<div class="arrow">\
<a href="#" id="uv<%id%>" class="vote" title="vote up">\
<img src="<%upArrow%>" />\
</a>\
<a href="#" id="uu<%id%>" class="un vote" title="vote up">\
<img src="<%upArrowPressed%>" />\
</a>\
</div>\
<div class="arrow">\
<a href="#" id="dv<%id%>" class="vote" title="vote down">\
<img src="<%downArrow%>" id="da<%id%>" />\
</a>\
<a href="#" id="du<%id%>" class="un vote" title="vote down">\
<img src="<%downArrowPressed%>" />\
</a>\
</div>\
</div>\
<div class="comment-content">\
<p class="tagline comment">\
<span class="user-id"><%username%></span>\
<span class="rating"><%pretty_rating%></span>\
<span class="delta"><%time.delta%></span>\
</p>\
<div class="comment-text comment"><#text#></div>\
<p class="comment-opts comment">\
<a href="#" class="reply hidden" id="rl<%id%>">reply ▹</a>\
<a href="#" class="close-reply" id="cr<%id%>">reply ▿</a>\
<a href="#" id="sp<%id%>" class="show-proposal">proposal ▹</a>\
<a href="#" id="hp<%id%>" class="hide-proposal">proposal ▿</a>\
<a href="#" id="dc<%id%>" class="delete-comment hidden">delete</a>\
<span id="cm<%id%>" class="moderation hidden">\
<a href="#" id="ac<%id%>" class="accept-comment">accept</a>\
</span>\
</p>\
<pre class="proposal" id="pr<%id%>">\
<#proposal_diff#>\
</pre>\
<ul class="comment-children" id="cl<%id%>"></ul>\
</div>\
<div class="clearleft"></div>\
</div>\
</div>';
var replyTemplate = '\
<li>\
<div class="reply-div" id="rd<%id%>">\
<form id="rf<%id%>">\
<textarea name="comment" cols="80"></textarea>\
<input type="submit" value="Add reply" />\
<input type="button" value="Cancel" />\
<input type="hidden" name="parent" value="<%id%>" />\
<input type="hidden" name="node" value="" />\
</form>\
</div>\
</li>';
$(document).ready(function() {
init();
});
})(jQuery);
$(document).ready(function() {
// add comment anchors for all paragraphs that are commentable
$('.sphinx-has-comment').comment();
// highlight search words in search results
$("div.context").each(function() {
var params = $.getQueryParameters();
var terms = (params.q) ? params.q[0].split(/\s+/) : [];
var result = $(this);
$.each(terms, function() {
result.highlightText(this.toLowerCase(), 'highlighted');
});
});
// directly open comment window if requested
var anchor = document.location.hash;
if (anchor.substring(0, 9) == '#comment-') {
$('#ao' + anchor.substring(9)).click();
document.location.hash = '#s' + anchor.substring(9);
}
}); | PypiClean |
/6D657461666C6F77-0.0.17.tar.gz/6D657461666C6F77-0.0.17/metaflow/multicore_utils.py | import sys
import os
import traceback
from itertools import islice
from multiprocessing import cpu_count
from tempfile import NamedTemporaryFile
try:
# Python 2
import cPickle as pickle
except:
# Python 3
import pickle
# This module reimplements select functions from the standard
# Python multiprocessing module.
#
# Three reasons why:
#
# 1) Multiprocessing has open bugs, e.g. https://bugs.python.org/issue29759
# 2) Work around limits, like the 32MB object limit in Queue, without
# introducing an external dependency like joblib.
# 3) Supports closures and lambdas in contrast to multiprocessing.
class MulticoreException(Exception):
pass
def _spawn(func, arg, dir):
with NamedTemporaryFile(prefix='parallel_map_',
dir=dir,
delete=False) as tmpfile:
output_file = tmpfile.name
# make sure stdout and stderr are flushed before forking. Otherwise
# we may print multiple copies of the same output
sys.stderr.flush()
sys.stdout.flush()
pid = os.fork()
if pid:
return pid, output_file
else:
try:
exit_code = 1
ret = func(arg)
with open(output_file, 'wb') as f:
pickle.dump(ret, f, protocol=pickle.HIGHEST_PROTOCOL)
exit_code = 0
except:
# we must not let any exceptions escape this function
# which might trigger unintended side-effects
traceback.print_exc()
finally:
sys.stderr.flush()
sys.stdout.flush()
# we can't use sys.exit(0) here since it raises SystemExit
# that may have unintended side-effects (e.g. triggering
# finally blocks).
os._exit(exit_code)
def parallel_imap_unordered(func, iterable, max_parallel=None, dir=None):
if max_parallel is None:
max_parallel = cpu_count()
ret = []
args_iter = iter(iterable)
pids = [_spawn(func, arg, dir)
for arg in islice(args_iter, max_parallel)]
while pids:
pid, output_file = pids.pop()
if os.waitpid(pid, 0)[1]:
raise MulticoreException('Child failed')
with open(output_file, 'rb') as f:
yield pickle.load(f)
os.remove(output_file)
arg = list(islice(args_iter, 1))
if arg:
pids.insert(0, _spawn(func, arg[0], dir))
def parallel_map(func, iterable, **kwargs):
def wrapper(arg_with_idx):
idx, arg = arg_with_idx
return idx, func(arg)
res = parallel_imap_unordered(wrapper, enumerate(iterable), **kwargs)
return [r for idx, r in sorted(res)] | PypiClean |
/BitGlitter-2.0.0.tar.gz/BitGlitter-2.0.0/bitglitter/read/scan/scanutilities.py | import numpy
import math
def return_distance(raw_frame_rgb, expected_value):
return math.sqrt(((raw_frame_rgb[0] - expected_value[0]) ** 2) + ((raw_frame_rgb[1] - expected_value[1]) ** 2) +
((raw_frame_rgb[2] - expected_value[2]) ** 2))
def color_snap(raw_frame_rgb, palette_color_list):
"""This takes the average RGB value of the pixels within the block, then loops through the full color palette to
determine the 'closest' palette. These defined "snapped" values can then be used to convert the colors into bits.
"""
closest_palette_match = None
closest_distance = 500
for color in palette_color_list:
active_distance = return_distance(raw_frame_rgb, color)
if active_distance < closest_distance:
closest_palette_match = color
closest_distance = active_distance
return closest_palette_match
def scan_block(image, pixel_width, block_width_position, block_height_position):
"""This function is whats used to scan the blocks used. First the scan area is determined, and then each of the
pixels in that area appended to a list. An average of those values as type int is returned.
"""
if pixel_width < 5:
start_position_x = int(block_width_position * pixel_width)
end_position_x = int((block_width_position * pixel_width) + pixel_width - 1)
start_position_y = int(block_height_position * pixel_width)
end_position_y = int((block_height_position * pixel_width) + pixel_width - 1)
else:
start_position_x = int(round((block_width_position * pixel_width) + (pixel_width * .25), 1))
end_position_x = int(round(start_position_x + (pixel_width * .5), 1))
start_position_y = int(round((block_height_position * pixel_width) + (pixel_width * .25), 1))
end_position_y = int(round(start_position_y + (pixel_width * .5), 1))
numpy_output = numpy.flip(image[start_position_y:end_position_y, start_position_x:end_position_x]).mean(axis=(0, 1))
to_list_format = numpy_output.tolist()
for value in range(3):
to_list_format[value] = int(to_list_format[value])
return to_list_format | PypiClean |
/BlueWhale3_Bioinformatics-4.1.32-py3-none-any.whl/orangecontrib/bioinformatics/widgets/OWdictyExpress.py | from typing import Optional
from requests.exceptions import ConnectionError
from AnyQt.QtGui import QFont
from AnyQt.QtCore import Qt, QSize
from AnyQt.QtWidgets import QLabel, QTreeWidget, QTreeWidgetItem
from Orange.data import Table, StringVariable
from Orange.widgets import gui, settings
from Orange.widgets.widget import Msg, OWWidget
from Orange.widgets.utils.signals import Output
from Orange.widgets.utils.concurrent import ConcurrentWidgetMixin
from orangecontrib.bioinformatics import resolwe
from orangecontrib.bioinformatics.resolwe import genapi, connect
from orangecontrib.bioinformatics.ncbi.gene import ENTREZ_ID, GeneMatcher
from orangecontrib.bioinformatics.resolwe.utils import etc_to_table
from orangecontrib.bioinformatics.widgets.utils.data import (
TAX_ID,
GENE_ID_COLUMN,
GENE_ID_ATTRIBUTE,
GENE_AS_ATTRIBUTE_NAME,
)
from orangecontrib.bioinformatics.widgets.components.resolwe import SignIn, get_credential_manager
from orangecontrib.bioinformatics.i18n_config import *
def __(key):
return i18n.t('bioinformatics.owdictyExpress.' + key)
Labels = [
(" ", " "),
("var.project", __("label.project")),
("static.name", __("label.experiment")),
("static.cite", __("label.citation")),
("var.growth", __("label.growth")),
("var.treatment", __("label.treatment")),
("var.strain", __("label.strain")),
]
class OWdictyExpress(OWWidget, ConcurrentWidgetMixin):
name = __("name")
description = __("desc")
icon = "../widgets/icons/OWdictyExpress.svg"
want_main_area = True
priority = 20
class Inputs:
pass
class Outputs:
etc_data = Output("Data", Table, i18n.t("bioinformatics.common.data"))
class Error(OWWidget.Error):
unreachable_host = Msg(__("msg_host_not_reachable"))
invalid_credentials = Msg(__("msg_invalid_credential"))
gene_as_attr_name = settings.Setting(0)
selected_item = settings.Setting(None, schema_only=True)
auto_commit = settings.Setting(False, schema_only=True)
def __init__(self):
super().__init__()
ConcurrentWidgetMixin.__init__(self)
self._res: Optional[genapi.GenAPI] = None
self.organism = '44689'
self.server = 'https://dictyexpress.research.bcm.edu'
self.headerLabels = [x[1] for x in Labels]
self.searchString = ""
self.items = []
self.genapi_pub_auth = {
'url': genapi.DEFAULT_URL,
'username': genapi.DEFAULT_EMAIL,
'password': genapi.DEFAULT_PASSWD,
}
# Login Section
box = gui.widgetBox(self.controlArea, __("btn.sign_in"))
self.user_info = gui.label(box, self, '')
self.server_info = gui.label(box, self, '')
box = gui.widgetBox(box, orientation=Qt.Horizontal)
self.sign_in_btn = gui.button(box, self, __("btn.sign_in"), callback=self.sign_in, autoDefault=False)
self.sign_out_btn = gui.button(box, self, __("btn.sign_out"), callback=self.sign_out, autoDefault=False)
box = gui.widgetBox(self.controlArea, __("box_output"))
gui.radioButtonsInBox(
box, self, "gene_as_attr_name", [__("gbox_gene_row"), __("gbox_gen_column")], callback=self.invalidate
)
self.clear_cache_btn = gui.button(
self.controlArea, self, __("btn_clear_cache"), autoDefault=False, callback=self.clear_cache
)
gui.rubber(self.controlArea)
self.commit_button = gui.auto_commit(self.controlArea, self, "auto_commit", __("btn_commit"), box=False)
# Experiment Section
label = QLabel(__("label_available_project"))
my_font = QFont()
my_font.setBold(True)
label.setFont(my_font)
self.mainArea.layout().addWidget(label)
self.filter = gui.lineEdit(
self.mainArea, self, "searchString", __("row.filter"), callbackOnType=True, callback=self.search_update
)
self.experimentsWidget = QTreeWidget(
alternatingRowColors=True, rootIsDecorated=False, uniformRowHeights=True, sortingEnabled=True
)
self.experimentsWidget.setItemDelegateForColumn(0, gui.IndicatorItemDelegate(self, role=Qt.DisplayRole))
self.experimentsWidget.selectionModel().selectionChanged.connect(self.on_selection_changed)
self.experimentsWidget.setHeaderLabels(self.headerLabels)
self.mainArea.layout().addWidget(self.experimentsWidget)
self.sign_in(silent=True)
self.sizeHint()
def sizeHint(self):
return QSize(1400, 680)
@property
def res(self):
return self._res
@res.setter
def res(self, value: genapi.GenAPI):
if isinstance(value, genapi.GenAPI):
self._res = value
self.Error.clear()
self.reset()
self.load_experiments()
self.update_user_status()
self.Outputs.etc_data.send(None)
def sign_in(self, silent=False):
dialog = SignIn(self, server_type='genesis')
if silent:
dialog.sign_in()
if dialog.resolwe_instance is not None:
self.res = dialog.resolwe_instance
else:
self.res = connect(**self.genapi_pub_auth, server_type=resolwe.GENESIS_PLATFORM)
if not silent and dialog.exec_():
self.res = dialog.resolwe_instance
def sign_out(self):
# Remove username and password
cm = get_credential_manager(resolwe.GENESIS_PLATFORM)
del cm.username
del cm.password
# Use public credentials when user signs out
self.res = connect(**self.genapi_pub_auth, server_type=resolwe.GENESIS_PLATFORM)
def update_user_status(self):
cm = get_credential_manager(resolwe.GENESIS_PLATFORM)
if cm.username:
user_info = __("row.user").format(cm.username)
self.sign_in_btn.setEnabled(False)
self.sign_out_btn.setEnabled(True)
else:
user_info = __("row.user").format("Anonymous")
self.sign_in_btn.setEnabled(True)
self.sign_out_btn.setEnabled(False)
self.user_info.setText(user_info)
self.server_info.setText(__("row.server").format(self.res._gen.url[8:]))
def clear_cache(self):
resolwe.GenAPI.clear_cache()
self.reset()
self.load_experiments()
def reset(self):
self.experimentsWidget.clear() # clear QTreeWidget
self.items = []
# self.lastSelected = None
self.searchString = ""
def search_update(self):
parts = self.searchString.split()
for item in self.items:
item.setHidden(not all(s in item for s in parts))
def on_exception(self, ex):
if isinstance(ex, ConnectionError) or isinstance(ex, ValueError):
self.Error.unreachable_host()
print(ex)
def on_done(self, results):
if isinstance(results, list):
self.load_tree_items(results)
elif isinstance(results, tuple):
self.send_to_output(results)
def load_experiments(self):
if self.res:
self.start(self.res.fetch_etc_objects)
def load_tree_items(self, list_of_exp):
self.items = [CustomTreeItem(self.experimentsWidget, item) for item in list_of_exp]
for i in range(len(self.headerLabels)):
self.experimentsWidget.resizeColumnToContents(i)
self.set_cached_indicator()
self.set_selected()
def set_selected(self):
for item in self.items:
if self.selected_item and item.gen_data_id == self.selected_item:
self.experimentsWidget.setCurrentItem(item)
def on_selection_changed(self):
self.invalidate()
def invalidate(self):
self.commit()
def send_to_output(self, result):
etc_json, table_name = result
# convert to table
data = etc_to_table(etc_json, bool(self.gene_as_attr_name))
# set table name
data.name = table_name
# match genes
gene_matcher = GeneMatcher(str(self.organism))
if not bool(self.gene_as_attr_name):
if 'Gene' in data.domain:
data = gene_matcher.match_table_column(data, 'Gene', StringVariable(ENTREZ_ID))
data.attributes[GENE_ID_COLUMN] = ENTREZ_ID
else:
data = gene_matcher.match_table_attributes(data)
data.attributes[GENE_ID_ATTRIBUTE] = ENTREZ_ID
# add table attributes
data.attributes[TAX_ID] = str(self.organism)
data.attributes[GENE_AS_ATTRIBUTE_NAME] = bool(self.gene_as_attr_name)
# reset cache indicators
self.set_cached_indicator()
# send data to the output signal
self.Outputs.etc_data.send(data)
def commit(self):
self.Error.clear()
selected_items = self.experimentsWidget.selectedItems() # get selected TreeItem
if len(selected_items) < 1:
self.Outputs.etc_data.send(None)
return
selected_item = selected_items[0]
self.selected_item = selected_item.gen_data_id
self.start(self.res.download_etc_data, selected_item.gen_data_id, table_name=selected_item.data_name)
def set_cached_indicator(self):
cached = self.res.get_cached_ids()
for item in self.items:
if item.gen_data_id in cached:
item.setData(0, Qt.DisplayRole, " ")
else:
item.setData(0, Qt.DisplayRole, "")
class CustomTreeItem(QTreeWidgetItem):
def __init__(self, parent, gen_data):
super(CustomTreeItem, self).__init__(parent) # Init super class (QtGui.QTreeWidgetItem )
self._gen_data = gen_data # GenData object
self.set_rows(self._gen_data.annotation) # set rows in QTreeWidget
def __contains__(self, text):
return any(text.upper() in str(self.text(i)).upper() for i in range(self.columnCount()))
@property
def gen_data_id(self):
return self._gen_data.id
@property
def data_name(self):
try:
project = self._gen_data.var['project']
experiment = self._gen_data.static['name']
except (AttributeError, KeyError):
project = ''
experiment = ''
return '{} ({})'.format(project, experiment)
def set_rows(self, row):
for index, label in enumerate(Labels):
if index > 0:
try:
if type(row[label[0]]["value"]) == list:
self.setText(index, row[label[0]]["value"][0]["name"])
else:
self.setText(index, row[label[0]]["value"])
except (IndexError, KeyError):
self.setText(index, 'No data')
if __name__ == "__main__":
from orangewidget.utils.widgetpreview import WidgetPreview
WidgetPreview(OWdictyExpress).run() | PypiClean |
/ChecklistDSL-0.0.1.alpha.3.tar.gz/ChecklistDSL-0.0.1.alpha.3/checklistdsl/lex.py | import re
class Token(object):
"""
Represents a token matched by the lexer.
"""
def __init__(self, token, value, roles=None, size=None):
"""
token - the type of token this is.
value - the matched value.
roles - named roles who have authority to action the item.
size - the "size" of the heading. 1 = big, 6 = small.
"""
self.token = token
self.value = value
self.roles = roles
self.size = size
def __repr__(self):
return '%s: "%s"' % (self.token, self.value)
"""
A dictionary that contains the regex used to match tokens and the associated
token types.
"""
MATCHER = {
# == Heading == (becomes an h* element where * is number of equal signs)
'(?P<depth_start>=+)(?P<value>(\\s|\\w)*)(?P<depth_end>=+)': 'HEADING',
# // This is a comment (ignored)
'\/\/(?P<value>.*)': 'COMMENT',
# [] item 1 (becomes a check box)
'\[\] *(?P<roles>{.*}|) *(?P<value>.*)': 'AND_ITEM',
# () item 1 (becomes a radio button)
'\(\) *(?P<roles>{.*}|) *(?P<value>.*)': 'OR_ITEM',
# --- (becomes an <hr/>)
'^-{3,}$': 'BREAK',
# Some text (becomes a <p>)
'(?P<value>[^=\/\[\(].*)': 'TEXT'
}
def get_tokens(data):
"""
Given some raw data will return a list of matched tokens. An example of the
simplest possible lexer.
"""
result = []
# Split on newline and throw away empty (un-needed) lines
split_by_lines = [line.strip() for line in data.split('\n')
if line.strip()]
for line in split_by_lines:
for regex in MATCHER.keys():
match = re.match(regex, line)
if match:
# Grab the named groups.
val = match.groupdict().get('value', '').strip()
roles = match.groupdict().get('roles', '').replace(
'{', '').replace('}', '').strip()
depth_start = match.groupdict().get('depth_start', '')
# Post process roles.
if roles:
roles = [role.lower().strip() for role in roles.split(',')]
else:
roles = None
# Post process depth_start to give the size of the heading.
if depth_start:
size = len(depth_start)
else:
size = None
# Instantiate the token depending on the match for the val
# named group.
if val:
token = Token(MATCHER[regex], val, roles=roles, size=size)
else:
token = Token(MATCHER[regex], match.string)
# Ignore comments
if token.token != 'COMMENT':
result.append(token)
break
return result | PypiClean |
/LFPy-2.3.tar.gz/LFPy-2.3/examples/bioRxiv281717/figure_7_8/example_parallel_network_parameters.py | import matplotlib
import os
if 'DISPLAY' not in os.environ.keys():
matplotlib.use('agg')
import os
import numpy as np
from scipy import stats
from glob import glob
import json
from parameters import ParameterSet
from mpi4py import MPI
import neuron
import sys
from urllib.request import urlopen
from example_parallel_network_methods import get_templatename, get_params, \
get_clipped_params, get_syn_params
import LFPy
stringType = 'U'
# set up MPI environment
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
# load some neuron-interface files needed for the EPFL cell types
neuron.h.load_file("stdrun.hoc")
neuron.h.load_file("import3d.hoc")
#######################
# Functions
#######################
def get_pre_m_type(post):
'''little helper function to return the most populuous excitatory m_type
within the layer of m_type post, assuming this is representative for
excitatory external connections onto postsynaptic cells '''
if post.startswith('L23'):
return 'L23_PC'
elif post.startswith('L4'):
return 'L4_PC'
elif post.startswith('L5'):
return 'L5_TTPC1'
elif post.startswith('L6'):
return 'L6_IPC'
#######################
# Parameters
#######################
# test mode (1 cell per pop, all-to-all connectivity)
TESTING = False
# Creating a NeuroTools.parameters.ParameterSet object for the main parameters
PSET = ParameterSet({})
# output file destination
if TESTING:
PSET.OUTPUTPATH = 'example_parallel_network_output_testing'
else:
PSET.OUTPUTPATH = 'example_parallel_network_output'
# input file paths
# PATHs to current cell-specific files and NMODL files
PSET.CWD = os.getcwd()
PSET.CELLPATH = 'hoc_combos_syn.1_0_10.allzips'
PSET.NMODL = 'hoc_combos_syn.1_0_10.allmods'
########################################################
# Simulation control
########################################################
PSET.dt = 2**-4 # simulation time step size (ms)
PSET.tstop = 1500. # simulation duration (ms)
PSET.v_init = -77. # membrane voltage(s) at t = 0 for all cells
PSET.celsius = 34. # global temperature setting affecting active channels
PSET.TRANSIENT = 500. # duration of startup transient
# population size scaling (multiplied with values in
# populationParams['POP_SIZE']):
PSET.POPSCALING = 1.
# global scaling of connection probabilities (to counteract POPSCALING)
PSET.CONNPROBSCALING = 1. / PSET.POPSCALING
# switch for fully connected network (do not use with large population sizes)
PSET.fully_connected = True if TESTING else False
'''
# bool flag switching LFP calculations on or off (faster)
PSET.COMPUTE_LFP = True
# bool flag switching ECoG calculation on or off
PSET.COMPUTE_ECOG = PSET.COMPUTE_LFP
# bool flag switching on calculations of electric current dipole moments
# per population
PSET.COMPUTE_P = PSET.COMPUTE_LFP
# bool flag switching on calculations of contributions to the extracellular
# potential per population
PSET.rec_pop_contributions = PSET.COMPUTE_LFP
'''
# downsample factor for timeseries plots
PSET.decimate_q = 10
# settings for filtered signals shown in plots (fc=100 Hz, lowpass)
PSET.filterargs = dict(
N=2,
Wn=100. *
2. *
PSET.dt /
1000 *
PSET.decimate_q,
btype='lowpass')
# Base NetworkCell arguments, morphology and template specific args is
# defined below.
cellParams = {
'passive': False,
'nsegs_method': None,
'v_init': PSET.v_init,
'tstart': 0,
'tstop': PSET.tstop,
'dt': PSET.dt,
'verbose': False,
'extracellular': False,
'delete_sections': False,
}
# some stimuli to activate the network
PSET.PointProcParams = {
'idx': 0,
'record_current': False,
'pptype': 'IClamp',
# 'amp' : 0.793, # amplitude parameter set later on
'dur': 1E6,
'delay': 0.,
}
# parameters for predicting extracellular potentials, specifying
# coordinates of electrodes and extracellular conductivity. So far only
# point contacts
PSET.electrodeParams = {
'x': np.zeros(16),
'y': np.zeros(16),
'z': np.linspace(-1500, 0, 16)[::-1],
'sigma': 0.3,
'n': 50,
'N': np.array([[0., 1., 0]] * 16),
'r': 5.,
'method': 'root_as_point',
}
# parameters for 4-sphere volume conductor model
# compute electrode positions on the outer radius for different angular offsets
_theta = np.linspace(-np.pi / 4, np.pi / 4, 9)
_x = 90000. * np.sin(_theta)
_y = np.zeros(_theta.size)
_z = 90000. * np.cos(_theta)
PSET.foursphereParams = {
'radii': [79000., 80000., 85000., 90000.], # shell radii
'sigmas': [0.3, 1.5, 0.015, 0.3], # shell conductivity
'r_electrodes': np.c_[_x, _y, _z], # contact coordinates
}
# Optional arguments to Network.simulate() for computing extracellular
# contribution from passive leak, membrane capactitance and synaptic currents
PSET.NetworkSimulateArgs = {
'use_ipas': False,
'use_icap': False,
'use_isyn': False,
'to_memory': True,
}
# layer thickness top to bottom L1-L6, Markram et al. 2015 Fig 3A.
PSET.layer_data = np.array([('L1', 165., -82.5),
('L2', 149., -239.5),
('L3', 353., -490.5),
('L4', 190., -762.),
('L5', 525, -1119.5),
('L6', 700, -1732.)],
dtype=[('layer', f'|{stringType}2'),
('thickness', float), ('center', float)])
# Define electrode geometry corresponding to an ECoG electrode, where contact
# points have a radius r, surface normal vectors N, and ECoG is calculated as
# the average LFP in n random points on each contact:
PSET.ecogParameters = {
'sigma_S': 0., # CSF conductivity
'sigma_T': 0.3, # GM conductivity
'sigma_G': 0.3, # WM conductivity
'h': PSET.layer_data['thickness'].sum(),
'x': np.array([0.]), # x,y,z-coordinates of electrode contacts
'y': np.array([0.]),
'z': np.array([0.]),
# +PSET.layer_data[4]['thickness']/8,
'z_shift': -PSET.layer_data['thickness'].sum(),
'n': 500,
'r': 250, # ECoG radii are often 500-1000 um
'N': np.array([[0., 0., 1.]]),
'method': "pointsource",
}
# Main population parameters:
'''
PSET.populationParameters = np.array([
# Layer 4
# Excitatory
('L4_PC', 'cAD', 'L4_PC_cADpyr230_1', 2674,
dict(
radius=210,
loc=PSET.layer_data[3]['center'],
scale=100.,
cap=[
1078.,
97.]),
dict(x=np.pi / 2, y=0.),
['dend', 'apic'],
['dend', 'apic'],
0.125, 5.),
# Inhibitory
('L4_LBC', 'dNAC', 'L4_LBC_dNAC222_1', 122,
dict(
radius=210,
loc=PSET.layer_data[3]['center'],
scale=100.,
cap=[
938.,
670]),
dict(x=np.pi / 2, y=0.),
['soma', 'dend', 'apic'],
['dend', 'apic'],
0.125, 5.),
'''
PSET.populationParameters = np.array([
# Layer 5
# Excitatory
('L5_TTPC1', 'cAD', 'L5_TTPC1_cADpyr232_1', 2403,
dict(
radius=210,
loc=PSET.layer_data[4]['center'],
scale=125.,
cap=[
719,
73.]),
dict(x=np.pi / 2, y=0.),
['dend', 'apic'],
['dend', 'apic'],
0.1, 5.),
# Inhibitory
('L5_MC', 'bAC', 'L5_MC_bAC217_1', 395,
dict(
radius=210,
loc=PSET.layer_data[4]['center'],
scale=125.,
cap=[
378.,
890]),
dict(x=np.pi / 2, y=0.),
['soma', 'dend', 'apic'],
['dend', 'apic'],
0.125, 5.),
],
dtype=[('m_type', f'|{stringType}32'),
('e_type', f'|{stringType}32'),
('me_type', f'|{stringType}32'),
('POP_SIZE', 'i8'),
('pop_args', dict),
('rotation_args', dict),
('syn_section', list),
('extrinsic_input_section', list),
('extrinsic_input_density', 'f8'),
('extrinsic_input_frequency', 'f8')])
# column data:
# shortnames as used in pathway_*.json files
# names as used to denote individual cell types
# POP_SIZE : number of neurons for each morphological type as given on
# https://bbp.epfl.ch/nmc-portal/microcircuit
# pop_args : dict,
# radius, mean position (loc) and standard deviation (scale) of the soma
# positions
# rotation_args : dict, default rotations around x and y axis applied to
# each cell in the population using LFPy.NetworkCell.set_rotation()
# method.
# syn_section : list
# list of section names where outgoing connections from this population
# are made onto postsynaptic neurons (i.e., no excitatory synapses on
# somatic sections anywhere)
# extrinsic_input_density : density of extrinisc incoming connections in
# units of [µm^-2]
# extrinsic_input_frequency : frequency of synapse activation in units of [Hz]
# TODO: Define only short names, pick random cell types or similar when
# creating populations. Column could be redone as
# [('m_type', '|U8'), ('e-type', '|U8')] and
# single cell objects picked from the glob('m+e type') on random
# # Override population sizes (for testing)
if TESTING:
PSET.populationParameters['POP_SIZE'] = np.ones(
PSET.populationParameters.size)
# Define a layer-specificity of connections L_YXL
# (see Hagen, Dahmen et al. (2016), Cereb Cortex) based on the anatomy of
# dendrites and axons. We here define this depth-dependence of synapse
# positioning as the product of total [soma + dendrite] length and
# total axon length in spatial bins corresponding to the thickness and
# boundaries of each layer. The products are normalized such that the sum of
# each column is 1, i.e., the sum of layer specificities of a connection
# between X and Y is 1.
PSET.L_YXL_m_types = {}
bins = np.r_[-PSET.layer_data['thickness'].cumsum()[::-1], 0]
for i, (y, Y, pop_args_Y, rotation_args_Y) in enumerate(zip(
PSET.populationParameters['m_type'],
PSET.populationParameters['me_type'],
PSET.populationParameters['pop_args'],
PSET.populationParameters['rotation_args'])):
# create a container for the layer specificities of connections
data = np.zeros((PSET.layer_data.size,
PSET.populationParameters.size))
# find and load the corresponding morphology files into LFPy
m_Y = glob(os.path.join(PSET.CELLPATH, Y, 'morphology', '*.asc'))[0]
cell_Y = LFPy.Cell(morphology=m_Y)
cell_Y.set_rotation(**rotation_args_Y)
cell_Y.set_pos(z=pop_args_Y['loc'])
# sum the total length of axon in each layer bin
layerbounds = np.r_[0, -PSET.layer_data['thickness'].cumsum()]
len_Y_sum = np.zeros(PSET.layer_data.size)
for k in range(PSET.layer_data.size):
len_Y_sum[k] = cell_Y.length[cell_Y.get_idx(
['soma', 'dend', 'apic'],
z_min=layerbounds[k + 1],
z_max=layerbounds[k])].sum()
cell_Y.__del__() # clean up section refs
for j, (X, pop_args_X, rotation_args_X) in enumerate(zip(
PSET.populationParameters['me_type'],
PSET.populationParameters['pop_args'],
PSET.populationParameters['rotation_args'])):
m_X = glob(os.path.join(PSET.CELLPATH, X, 'morphology', '*.asc'))[0]
cell_X = LFPy.Cell(morphology=m_X)
cell_X.set_rotation(**rotation_args_X)
cell_X.set_pos(z=pop_args_X['loc'])
len_X_sum = np.zeros(PSET.layer_data.size)
for k in range(PSET.layer_data.size):
len_X_sum[k] = cell_X.length[cell_X.get_idx(
'axon', z_min=layerbounds[k + 1], z_max=layerbounds[k])].sum()
data[:, j] = np.sqrt(len_Y_sum * len_X_sum) / \
np.sqrt(len_Y_sum * len_X_sum).sum()
cell_X.__del__() # clean up section refs
# fill in
PSET.L_YXL_m_types[y] = data
# clean up namespace
del cell_X, cell_Y, len_X_sum, len_Y_sum, data
# Container for LFPy.NetworkCell class parameters (path to morphology file
# etc.)
PSET.cellParameters = dict()
##########################################################################
# Set up various files and folders such that single-cell models from BBP can
# be used, and extract some numbers from pathway .json files
##########################################################################
# TODO: Add automated download of cell models from EPFL microcircuit portal
# autodownload some json files with anatomical and pathway specific data
pathway_files = ['pathways_anatomy_factsheets_simplified.json',
'pathways_physiology_factsheets_simplified.json']
if RANK == 0:
for fname in pathway_files:
if not os.path.isfile(fname):
u = urlopen(
'https://bbp.epfl.ch/nmc-portal/documents/10184/7288948/' +
fname)
localFile = open(fname, 'w')
localFile.write(u.read().decode('utf-8'))
localFile.close()
u.close()
COMM.Barrier()
# flag for cell template file to switch on (inactive) synapses
add_synapses = False
# load synapse file info for each cell type as structured arrays in dictionary
synapses_tsv_dtype = [
('synapse_id', int),
('pre_cell_id', int),
('pre_mtype', int),
('sectionlist_id', int),
('sectionlist_index', int),
('seg_x', float),
('synapse_type', int),
('dep', float),
('fac', float),
('use', float),
('tau_d', float),
('delay', float),
('weight', float)
]
synapses_tsv = {}
# attempt to set up a folder with all unique EPFL mechanism mod files,
# compile, and load them all in order to be able to load cells as
# LFPy.NetworkCell objects
if RANK == 0:
if not os.path.isdir(PSET.NMODL):
os.mkdir(PSET.NMODL)
for NRN in PSET.populationParameters['me_type']:
for nmodl in glob(os.path.join(
PSET.CELLPATH, NRN, 'mechanisms', '*.mod')):
while not os.path.isfile(
os.path.join(PSET.NMODL, os.path.split(nmodl)[-1])):
os.system(f"cp {nmodl} {os.path.join(PSET.NMODL, '.')}")
os.chdir(PSET.NMODL)
# patch faulty ProbGABAAB_EMS.mod file (otherwise stochastic inhibitory
# synapses will stay closed except at first activation)
diff = '''319c319
< urand = scop_random(1)
---
> value = scop_random(1)
'''
f = open('ProbGABAAB_EMS.patch', 'w')
f.writelines(diff)
f.close()
os.system('patch ProbGABAAB_EMS.mod ProbGABAAB_EMS.patch')
os.system('nrnivmodl')
os.chdir(PSET.CWD)
# load mechanisms
neuron.load_mechanisms(PSET.NMODL)
# Fill in dictionary of population-specific cell parameters
for NRN in PSET.populationParameters['me_type']:
os.chdir(os.path.join(PSET.CWD, PSET.CELLPATH, NRN))
# get the template name
f = open("template.hoc", 'r')
templatename = get_templatename(f)
f.close()
# get biophys template name
f = open("biophysics.hoc", 'r')
biophysics = get_templatename(f)
f.close()
# get morphology template name
f = open("morphology.hoc", 'r')
morphology = get_templatename(f)
f.close()
# get synapses template name
f = open(os.path.join("synapses", "synapses.hoc"), 'r')
synapses = get_templatename(f)
f.close()
if not hasattr(neuron.h, morphology):
"""Create the cell model"""
# Load morphology
neuron.h.load_file(1, "morphology.hoc")
if not hasattr(neuron.h, biophysics):
# Load biophysics
neuron.h.load_file(1, "biophysics.hoc")
if not hasattr(neuron.h, synapses):
# load synapses
neuron.h.load_file(1, os.path.join('synapses', 'synapses.hoc'))
if not hasattr(neuron.h, templatename):
# Load main cell template
neuron.h.load_file(1, "template.hoc")
# create parameter dictionaries specific for each cell type (population)
PSET.cellParameters[NRN] = dict(list(dict(
morphology=glob(os.path.join('morphology', '*'))[0],
templatefile=os.path.join(NRN, 'template.hoc'),
templatename=templatename,
templateargs=1 if add_synapses else 0,
).items()) + list(cellParams.items()))
# load synapse and connectivity data. mtype_map is the same for all cell types
if sys.version < '3':
with open(os.path.join('synapses', 'mtype_map.tsv')) as f:
mtype_map = np.loadtxt(f,
dtype={'names': ('pre_mtype_id', 'pre_mtype'),
'formats': ('i4', f'{stringType}9')},
converters={1: lambda s: s.decode()})
else:
with open(os.path.join('synapses', 'mtype_map.tsv'),
encoding='us-ascii') as f:
mtype_map = np.loadtxt(f,
dtype={'names': ('pre_mtype_id', 'pre_mtype'),
'formats': ('i4', f'{stringType}9')},
converters={1: lambda s: s.decode()})
os.chdir(PSET.CWD)
for name in PSET.populationParameters['m_type']:
files = glob(
os.path.join(
PSET.CELLPATH,
name + '*',
'synapses',
'synapses.tsv'))
synapses_tsv[name] = np.array([], dtype=synapses_tsv_dtype)
for f in files:
synapses_tsv[name] = np.r_[
synapses_tsv[name], np.loadtxt(
f, dtype=synapses_tsv_dtype, skiprows=1)]
# Open pathway anatomy and physiology factsheet files and read out info
pathways_anatomy = dict()
pathways_physiology = dict()
f = open(pathway_files[0], 'r')
j = json.load(f)
for pre in PSET.populationParameters['m_type']:
for post in PSET.populationParameters['m_type']:
key = f'{pre}:{post}'
try:
pathways_anatomy[key] = j[key]
except KeyError:
# fill in dummy data, no synapses will be created
print(f'no pathway anatomy data for connection {key}')
if sys.version < '3':
pathways_anatomy[key] = {
'common_neighbor_bias': 0,
'connection_probability': 0,
'mean_number_of_synapse_per_connection': 0,
'number_of_convergent_neuron_mean': 0,
'number_of_convergent_neuron_std': 0,
'number_of_divergent_neuron_mean': 0,
'number_of_divergent_neuron_std': 0,
'number_of_synapse_per_connection_std': 0,
'total_synapse_count': 0,
}
else:
pathways_anatomy[key] = {
u'common_neighbor_bias': 0,
u'connection_probability': 0,
u'mean_number_of_synapse_per_connection': 0,
u'number_of_convergent_neuron_mean': 0,
u'number_of_convergent_neuron_std': 0,
u'number_of_divergent_neuron_mean': 0,
u'number_of_divergent_neuron_std': 0,
u'number_of_synapse_per_connection_std': 0,
u'total_synapse_count': 0,
}
f.close()
j.clear()
f = open(pathway_files[1], 'r')
j = json.load(f)
for pre in PSET.populationParameters['m_type']:
for post in PSET.populationParameters['m_type']:
key = f'{pre}:{post}'
try:
pathways_physiology[key] = j[key]
except KeyError:
# fill in dummy data, no synapses will be created
print(f'no pathway physiology data for connection {key}')
if sys.version < '3':
pathways_physiology[key] = {
'cv_psp_amplitude_mean': 3,
'cv_psp_amplitude_std': 0.95,
'd_mean': 360,
'd_std': 230,
'decay_mean': 9.8,
'decay_std': 6.7,
'epsp_mean': 1.6,
'epsp_std': 0.78,
'f_mean': 330,
'f_std': 240,
'failures_mean': 86,
'failures_std': 6.5,
'gsyn_mean': 0.3,
'gsyn_std': 0.11,
'latency_mean': 0.33,
'latency_std': 0.18,
'risetime_mean': 0.43,
'risetime_std': 0.47,
'space_clamp_correction_factor': 3.6,
'synapse_type': u'Excitatory, depressing',
'u_mean': 0.19,
'u_std': 0.23
}
else:
pathways_physiology[key] = {
u'cv_psp_amplitude_mean': 3,
u'cv_psp_amplitude_std': 0.95,
u'd_mean': 360,
u'd_std': 230,
u'decay_mean': 9.8,
u'decay_std': 6.7,
u'epsp_mean': 1.6,
u'epsp_std': 0.78,
u'f_mean': 330,
u'f_std': 240,
u'failures_mean': 86,
u'failures_std': 6.5,
u'gsyn_mean': 0.3,
u'gsyn_std': 0.11,
u'latency_mean': 0.33,
u'latency_std': 0.18,
u'risetime_mean': 0.43,
u'risetime_std': 0.47,
u'space_clamp_correction_factor': 3.6,
u'synapse_type': u'Excitatory, depressing',
u'u_mean': 0.19,
u'u_std': 0.23
}
f.close()
j.clear()
# get out stats for synapses and connections, temporary
syn_param_stats = get_syn_params(PSET.populationParameters['m_type'],
PSET.populationParameters['me_type'],
pathways_physiology, mtype_map, synapses_tsv)
del synapses_tsv # not needed anymore.
###########################################################################
# Set up main connection parameters used by Network class instance methods
############################################################################
# Main connection parameters between pre and post-synaptic populations
# organized as dictionary of parameter lists between pre and postsynaptic
# populations:
if PSET.fully_connected:
# fully connected network (no selfconnections)
connprob = [[1] * PSET.populationParameters.size] * \
PSET.populationParameters.size
else:
connprob = get_params(PSET.populationParameters['m_type'],
pathways_anatomy,
'connection_probability',
# unit conversion % -> fraction
0.01 * PSET.CONNPROBSCALING)
PSET.connParams = dict(
# connection probabilities between populations
connprob=connprob,
# synapse mechanisms
syntypes=[[neuron.h.ProbAMPANMDA_EMS
if syn_param_stats[f'{pre}:{post}'
]['synapse_type'] >= 100 else
neuron.h.ProbGABAAB_EMS
for post in PSET.populationParameters['m_type']]
for pre in PSET.populationParameters['m_type']],
# synapse time constants and reversal potentials.
# Use the mean/global EPFL synapse model parameters
# (for now) as some connections appear to be missing in pathway files.
synparams=[[dict(
Use=syn_param_stats[f'{pre}:{post}']['Use_mean'],
Dep=syn_param_stats[f'{pre}:{post}']['Dep_mean'],
Fac=syn_param_stats[f'{pre}:{post}']['Fac_mean'],
tau_r_AMPA=0.2,
tau_d_AMPA=syn_param_stats[f'{pre}:{post}']['tau_d_mean'],
tau_r_NMDA=0.29,
tau_d_NMDA=43,
e=0,
mg=1,
u0=0,
synapseID=0,
verboseLevel=0,
NMDA_ratio=0.4 # this may take on several values in synconf.txt files,
# not accounted for here
)
if syn_param_stats[f'{pre}:{post}'
]['synapse_type'] >= 100 else
dict(
Use=syn_param_stats[f'{pre}:{post}']['Use_mean'],
Dep=syn_param_stats[f'{pre}:{post}']['Dep_mean'],
Fac=syn_param_stats[f'{pre}:{post}']['Fac_mean'],
tau_r_GABAA=0.2,
# from synapses.hoc: rng.lognormal(0.2, 0.1) (mean, variance)
tau_d_GABAA=syn_param_stats[f'{pre}:{post}']['tau_d_mean'],
tau_r_GABAB=3.5,
tau_d_GABAB=260.9,
e_GABAA=-80,
e_GABAB=-75.8354,
u0=0,
synapseID=0,
verboseLevel=0,
GABAB_ratio=0.0,
# this may take on several values, in synconf.txt files, not accounted
# for here
)
for post in PSET.populationParameters['m_type']]
for pre in PSET.populationParameters['m_type']],
# maximum conductances
weightfuns=[[np.random.normal] * PSET.populationParameters.size] * \
PSET.populationParameters.size,
weightargs=get_params(PSET.populationParameters['m_type'],
pathways_physiology,
['gsyn_mean', 'gsyn_std'], 1.),
# Correct??? (very small PSPs otherwise).
# Also, weights in unknown units loaded from synapses_tsv is different
# than the reported averaged gsyn.
# connection delays
delayfuns=[[stats.truncnorm] * PSET.populationParameters.size] * \
PSET.populationParameters.size,
delayargs=[[dict(
a=(2**-3 - syn_param_stats[f'{pre}:{post}']['delay_mean']) /
syn_param_stats[f'{pre}:{post}']['delay_std'],
b=np.inf,
loc=syn_param_stats[f'{pre}:{post}']['delay_mean'],
scale=syn_param_stats[f'{pre}:{post}']['delay_std']
) for post in PSET.populationParameters['m_type']]
for pre in PSET.populationParameters['m_type']],
# min delays now set by delayargs[['a']], this param will be deprecated
mindelay=None,
# numbers of synapses per connection
multapsefuns=[[stats.truncnorm] \
* PSET.populationParameters.size] \
* PSET.populationParameters.size,
multapseargs=get_clipped_params(PSET.populationParameters['m_type'],
pathways_anatomy,
['mean_number_of_synapse_per_connection',
'number_of_synapse_per_connection_std'],
myclip_a=1, myclip_b=20),
# parameters for finding random synapse locations using the method
# LFPy.Cell.get_rand_idx_area_and_distribution_norm. The argument nidx is
# default to 1
syn_pos_args=[[dict(section=syn_section,
z_min=-1E6,
z_max=1E6,
fun=[stats.norm] * PSET.layer_data.size,
funargs=[dict(loc=loc, scale=scale / 2.)
for loc, scale in PSET.layer_data[
['center', 'thickness']]],
funweights=PSET.L_YXL_m_types[post_m_type][:, i]
) for i, pre_m_type in enumerate(
PSET.populationParameters['m_type'])]
for post_m_type, syn_section in PSET.populationParameters[
['m_type', 'syn_section']]],
)
# save connection data
PSET.save_connections = False
# connection parameters for synapses activated by putative external
# population(s)
PSET.connParamsExtrinsic = dict(
# synapse type
syntype='ProbAMPANMDA_EMS',
# synapse parameters (assumes parameters of excitatory population in the
# layer)
synparams=[dict(
Use=syn_param_stats[f'{get_pre_m_type(post)}:{post}']['Use_mean'],
Dep=syn_param_stats[f'{get_pre_m_type(post)}:{post}']['Dep_mean'],
Fac=syn_param_stats[f'{get_pre_m_type(post)}:{post}']['Fac_mean'],
tau_r_AMPA=0.2,
tau_d_AMPA=syn_param_stats[f'{get_pre_m_type(post)}:{post}'
]['tau_d_mean'],
tau_r_NMDA=0.29,
tau_d_NMDA=43,
e=0,
mg=1,
u0=0,
synapseID=0,
verboseLevel=0,
NMDA_ratio=0.4 # this may take on several values in synconf.txt files,
# not accounted for here
) for post in PSET.populationParameters['m_type']],
# maximum conductances
weightfuns=[np.random.normal] * PSET.populationParameters.size,
weightargs=[get_params(np.array([m_type]), pathways_physiology,
['gsyn_mean', 'gsyn_std'], 1.)[0][0]
for m_type in PSET.populationParameters['m_type']],
) | PypiClean |
/DeepPhysX.Sofa-22.12.1.tar.gz/DeepPhysX.Sofa-22.12.1/examples/demos/Liver/FC/dataset.py | import os
import sys
# DeepPhysX related imports
from DeepPhysX.Core.Pipelines.BaseDataGeneration import BaseDataGeneration
from DeepPhysX.Core.Database.BaseDatabaseConfig import BaseDatabaseConfig
from DeepPhysX.Sofa.Environment.SofaEnvironmentConfig import SofaEnvironmentConfig
# Session related imports
from download import LiverDownloader
LiverDownloader().get_session('run')
from Environment.LiverTraining import LiverTraining
# Dataset parameters
nb_batches = {'training': 500, 'validation': 50}
batch_size = {'training': 32, 'validation': 10}
def launch_data_generation(dataset_dir, dataset_mode):
# Environment configuration
environment_config = SofaEnvironmentConfig(environment_class=LiverTraining,
visualizer='vedo',
as_tcp_ip_client=True,
number_of_thread=4)
# Dataset configuration
database_config = BaseDatabaseConfig(existing_dir=dataset_dir,
max_file_size=1,
mode=dataset_mode,
normalize=True)
# Create DataGenerator
data_generator = BaseDataGeneration(environment_config=environment_config,
database_config=database_config,
session_dir='sessions',
session_name='liver_data_user',
batch_nb=nb_batches[dataset_mode],
batch_size=batch_size[dataset_mode])
# Launch the data generation session
data_generator.execute()
if __name__ == '__main__':
# Define dataset
user_session = 'sessions/liver_data_user'
dataset = user_session if os.path.exists(user_session) else None
# Get dataset mode
mode = 'training'
if len(sys.argv) > 1:
if sys.argv[1] != '-v':
print("Script option must be '-v' to produce validation dataset."
"By default, training dataset is produced.")
quit(0)
mode = 'validation'
# Launch pipeline
launch_data_generation(dataset, mode) | PypiClean |
/EARL-pytorch-0.5.1.tar.gz/EARL-pytorch-0.5.1/rlgym/utils/state_setters/random_state.py | from rlgym.utils.state_setters import StateSetter
from rlgym.utils.state_setters import StateWrapper
from rlgym.utils.math import rand_vec3
import numpy as np
from numpy import random as rand
X_MAX = 7000
Y_MAX = 9000
Z_MAX_BALL = 1850
Z_MAX_CAR = 1900
PITCH_MAX = np.pi/2
YAW_MAX = np.pi
ROLL_MAX = np.pi
class RandomState(StateSetter):
def __init__(self, ball_rand_speed: bool = False, cars_rand_speed: bool = False, cars_on_ground: bool = True):
"""
RandomState constructor.
:param ball_rand_speed: Boolean indicating whether the ball will have a randomly set velocity.
:param cars_rand_speed: Boolean indicating whether cars will have a randomly set velocity.
:param cars_on_ground: Boolean indicating whether cars should only be placed on the ground.
"""
super().__init__()
self.ball_rand_speed = ball_rand_speed
self.cars_rand_speed = cars_rand_speed
self.cars_on_ground = cars_on_ground
def reset(self, state_wrapper: StateWrapper):
"""
Modifies the StateWrapper to contain random values the ball and each car.
:param state_wrapper: StateWrapper object to be modified with desired state values.
"""
self._reset_ball_random(state_wrapper, self.ball_rand_speed)
self._reset_cars_random(state_wrapper, self.cars_on_ground, self.cars_rand_speed)
def _reset_ball_random(self, state_wrapper: StateWrapper, random_speed: bool):
"""
Function to set the ball to a random position.
:param state_wrapper: StateWrapper object to be modified.
:param random_speed: Boolean indicating whether to randomize velocity values.
"""
state_wrapper.ball.set_pos(rand.random(
) * X_MAX - X_MAX/2, rand.random() * Y_MAX - Y_MAX/2, rand.random() * Z_MAX_BALL + 100)
if random_speed:
state_wrapper.ball.set_lin_vel(*rand_vec3(3000))
state_wrapper.ball.set_ang_vel(*rand_vec3(6))
def _reset_cars_random(self, state_wrapper: StateWrapper, on_ground: bool, random_speed: bool):
"""
Function to set all cars to a random position.
:param state_wrapper: StateWrapper object to be modified.
:param on_ground: Boolean indicating whether to place cars only on the ground.
:param random_speed: Boolean indicating whether to randomize velocity values.
"""
for car in state_wrapper.cars:
# set random position and rotation for all cars based on pre-determined ranges
car.set_pos(rand.random() * X_MAX - X_MAX/2, rand.random()
* Y_MAX - Y_MAX/2, rand.random() * Z_MAX_CAR + 150)
car.set_rot(rand.random() * PITCH_MAX - PITCH_MAX/2, rand.random()
* YAW_MAX - YAW_MAX/2, rand.random() * ROLL_MAX - ROLL_MAX/2)
car.boost = rand.random()
if random_speed:
# set random linear and angular velocity based on pre-determined ranges
car.set_lin_vel(*rand_vec3(2300))
car.set_ang_vel(*rand_vec3(5.5))
# 100% of cars will be set on ground if on_ground == True
# otherwise, 50% of cars will be set on ground
if on_ground or rand.random() < 0.5:
# z position (up/down) is set to ground
car.set_pos(z=17)
# z linear velocity (vertical) set to 0
car.set_lin_vel(z=0)
# pitch (front of car up/down) set to 0
# roll (side of car up/down) set to 0
car.set_rot(pitch=0, roll=0)
# x angular velocity (affects pitch) set to 0
# y angular velocity (affects) roll) set to 0
car.set_ang_vel(x=0, y=0) | PypiClean |
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/nass_cds.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def nass_cds(path):
"""Airbag and other influences on accident fatalities
US data, for 1997-2002, from police-reported car crashes in which there
is a harmful event (people or property), and from which at least one
vehicle was towed. Data are restricted to front-seat occupants, include
only a subset of the variables recorded, and are restricted in other
ways also.
A data frame with 26217 observations on the following 15 variables.
`dvcat`
ordered factor with levels (estimated impact speeds) `1-9km/h`,
`10-24`, `25-39`, `40-54`, `55+`
`weight`
Observation weights, albeit of uncertain accuracy, designed to
account for varying sampling probabilities.
`dead`
factor with levels `alive` `dead`
`airbag`
a factor with levels `none` `airbag`
`seatbelt`
a factor with levels `none` `belted`
`frontal`
a numeric vector; 0 = non-frontal, 1=frontal impact
`sex`
a factor with levels `f` `m`
`ageOFocc`
age of occupant in years
`yearacc`
year of accident
`yearVeh`
Year of model of vehicle; a numeric vector
`abcat`
Did one or more (driver or passenger) airbag(s) deploy? This factor
has levels `deploy` `nodeploy` `unavail`
`occRole`
a factor with levels `driver` `pass`
`deploy`
a numeric vector: 0 if an airbag was unavailable or did not deploy;
1 if one or more bags deployed.
`injSeverity`
a numeric vector; 0:none, 1:possible injury, 2:no incapacity,
3:incapacity, 4:killed; 5:unknown, 6:prior death
`caseid`
character, created by pasting together the populations sampling
unit, the case number, and the vehicle number. Within each year, use
this to uniquely identify the vehicle.
http://www.stat.colostate.edu/~meyer/airbags.htm\\
ftp://ftp.nhtsa.dot.gov/nass/
See also\\ http://www.maths.anu.edu.au/~johnm/datasets/airbags
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `nass_cds.csv`.
Returns:
Tuple of np.ndarray `x_train` with 26217 rows and 15 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'nass_cds.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/nassCDS.csv'
maybe_download_and_extract(path, url,
save_file_name='nass_cds.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | PypiClean |
/Oasys-Canvas-Core-1.0.7.tar.gz/Oasys-Canvas-Core-1.0.7/orangecanvas/application/schemeinfo.py | import six
from PyQt5.QtWidgets import (
QWidget, QDialog, QLabel, QTextEdit, QCheckBox, QFormLayout,
QVBoxLayout, QHBoxLayout, QDialogButtonBox, QSizePolicy
)
from PyQt5.QtCore import Qt
from ..gui.lineedit import LineEdit
from ..gui.utils import StyledWidget_paintEvent, StyledWidget
class SchemeInfoEdit(QWidget):
"""Scheme info editor widget.
"""
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
self.scheme = None
self.__setupUi()
def __setupUi(self):
layout = QFormLayout()
layout.setRowWrapPolicy(QFormLayout.WrapAllRows)
layout.setFieldGrowthPolicy(QFormLayout.ExpandingFieldsGrow)
self.name_edit = LineEdit(self)
self.name_edit.setPlaceholderText(self.tr("untitled"))
self.name_edit.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Fixed)
self.desc_edit = QTextEdit(self)
self.desc_edit.setTabChangesFocus(True)
layout.addRow(self.tr("Title"), self.name_edit)
layout.addRow(self.tr("Description"), self.desc_edit)
self.__schemeIsUntitled = True
self.setLayout(layout)
def setScheme(self, scheme):
"""Set the scheme to display/edit
"""
self.scheme = scheme
if not scheme.title:
self.name_edit.setText(self.tr("untitled"))
self.name_edit.selectAll()
self.__schemeIsUntitled = True
else:
self.name_edit.setText(scheme.title)
self.__schemeIsUntitled = False
self.desc_edit.setPlainText(scheme.description or "")
def commit(self):
"""Commit the current contents of the editor widgets
back to the scheme.
"""
if self.__schemeIsUntitled and \
self.name_edit.text() == self.tr("untitled"):
# 'untitled' text was not changed
name = ""
else:
name = six.text_type(self.name_edit.text()).strip()
description = six.text_type(self.desc_edit.toPlainText()).strip()
self.scheme.title = name
self.scheme.description = description
def paintEvent(self, event):
return StyledWidget_paintEvent(self, event)
def title(self):
return six.text_type(self.name_edit.text()).strip()
def description(self):
return six.text_type(self.desc_edit.toPlainText()).strip()
class SchemeInfoDialog(QDialog):
def __init__(self, *args, **kwargs):
QDialog.__init__(self, *args, **kwargs)
self.scheme = None
self.__autoCommit = True
self.__setupUi()
def __setupUi(self):
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.editor = SchemeInfoEdit(self)
self.editor.layout().setContentsMargins(20, 20, 20, 20)
self.editor.layout().setSpacing(15)
self.editor.setSizePolicy(QSizePolicy.MinimumExpanding,
QSizePolicy.MinimumExpanding)
heading = self.tr("Workflow Info")
heading = "<h3>{0}</h3>".format(heading)
self.heading = QLabel(heading, self, objectName="heading")
# Insert heading
self.editor.layout().insertRow(0, self.heading)
self.buttonbox = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
Qt.Horizontal,
self
)
# Insert button box
self.editor.layout().addRow(self.buttonbox)
'''
widget = StyledWidget(self, objectName="auto-show-container")
check_layout = QHBoxLayout()
check_layout.setContentsMargins(20, 10, 20, 10)
self.__showAtNewSchemeCheck = \
QCheckBox(self.tr("Show when I make a New Workflow."),
self,
objectName="auto-show-check",
checked=False,
)
check_layout.addWidget(self.__showAtNewSchemeCheck)
check_layout.addWidget(
QLabel(self.tr("You can also edit Workflow Info later "
"(File -> Workflow Info)."),
self,
objectName="auto-show-info"),
alignment=Qt.AlignRight)
widget.setLayout(check_layout)
widget.setSizePolicy(QSizePolicy.MinimumExpanding,
QSizePolicy.Fixed)
'''
if self.__autoCommit:
self.buttonbox.accepted.connect(self.editor.commit)
self.buttonbox.accepted.connect(self.accept)
self.buttonbox.rejected.connect(self.reject)
layout.addWidget(self.editor, stretch=10)
#layout.addWidget(widget)
self.setLayout(layout)
def setShowAtNewScheme(self, checked):
"""
Set the 'Show at new scheme' check state.
"""
self.__showAtNewSchemeCheck.setChecked(checked)
def showAtNewScheme(self):
"""
Return the check state of the 'Show at new scheme' check box.
"""
return self.__showAtNewSchemeCheck.isChecked()
def setAutoCommit(self, auto):
if self.__autoCommit != auto:
self.__autoCommit = auto
if auto:
self.buttonbox.accepted.connect(self.editor.commit)
else:
self.buttonbox.accepted.disconnect(self.editor.commit)
def setScheme(self, scheme):
"""Set the scheme to display/edit.
"""
self.scheme = scheme
self.editor.setScheme(scheme) | PypiClean |
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/calculate_savings_request_py3.py |
from msrest.serialization import Model
class CalculateSavingsRequest(Model):
"""CalculateSavingsRequest.
:param bill_selection_option: Determines which option will be used to
select the bills for this instance of the cost avoidance processor to
process
- "AllBills" means all bills will be processed
- "ModifiedOrNotProcessed" means only new bills or bills which have been
modified since the last time cost avoidance was run will be processed
- "BillingPeriod" means that only bills with a billing period which is
greater than or equal to the provided billing period will be processed
<span class='property-internal'>One of AllBills, ModifiedOrNotProcessed,
BillingPeriod </span>
:type bill_selection_option: str
:param billing_period: When BillSelectionOption is 'BillingPeriod', this
defines the billing period to be used to target bills for processing <span
class='property-internal'>Required when BillSelectionOption is set to
BillingPeriod</span>
:type billing_period: int
:param skip_special_adjustment_option: Allow the user to optionally
exclude one or all special adjustments <span class='property-internal'>One
of SkipAll, SkipOne, SkipNone </span>
:type skip_special_adjustment_option: str
:param special_adjustment_type_id_to_ignore: During cost avoidance
processing, the special adjustment having the provided special adjustment
type ID will not be applied (only if SkipSpecialAdjustmentOption =
"SkipOne") <span class='property-internal'>Required when
SkipSpecialAdjustmentOption is set to SkipOne</span>
:type special_adjustment_type_id_to_ignore: int
:param filters: Filter criteria which determine the meters whose savings
will be reprocessed
:type filters: list[~energycap.sdk.models.FilterEdit]
"""
_attribute_map = {
'bill_selection_option': {'key': 'billSelectionOption', 'type': 'str'},
'billing_period': {'key': 'billingPeriod', 'type': 'int'},
'skip_special_adjustment_option': {'key': 'skipSpecialAdjustmentOption', 'type': 'str'},
'special_adjustment_type_id_to_ignore': {'key': 'specialAdjustmentTypeIdToIgnore', 'type': 'int'},
'filters': {'key': 'filters', 'type': '[FilterEdit]'},
}
def __init__(self, *, bill_selection_option: str=None, billing_period: int=None, skip_special_adjustment_option: str=None, special_adjustment_type_id_to_ignore: int=None, filters=None, **kwargs) -> None:
super(CalculateSavingsRequest, self).__init__(**kwargs)
self.bill_selection_option = bill_selection_option
self.billing_period = billing_period
self.skip_special_adjustment_option = skip_special_adjustment_option
self.special_adjustment_type_id_to_ignore = special_adjustment_type_id_to_ignore
self.filters = filters | PypiClean |
/ESMValTool_sample_data-0.0.3-py3-none-any.whl/esmvaltool_sample_data/__init__.py | from pathlib import Path
import cf_units
import iris
base_dir = Path(__file__).parent
VERBOSE = False
# This ignore list is used to help with debugging only
# for a more permanent solution, add
# problematic datasets the `ignore` section in `../datasets.yml`.
ignore_list = [
# 'CMIP6/CMIP/AWI/AWI-ESM-1-1-LR/historical/r1i1p1f1/Amon/ta/gn/v20200212',
]
def strip_attributes(cube: 'iris.Cube') -> None:
"""Remove attributes in-place that cause issues with merging and
concatenation."""
for attr in ['creation_date', 'tracking_id', 'history']:
if attr in cube.attributes:
cube.attributes.pop(attr)
def simplify_time(cube: 'iris.Cube') -> None:
"""Simplifies the time coordinate in-place."""
coord = cube.coord('time')
coord.convert_units(
cf_units.Unit('days since 1850-1-1 00:00:00',
calendar=coord.units.calendar))
def load_cubes_from_input_dirs(input_dirs: list) -> 'iris.Cube':
"""Generator that loads all *.nc files from each input dir into a cube."""
for i, input_dir in enumerate(sorted(input_dirs)):
if VERBOSE:
print(f'Loading #{i:02d}:', input_dir)
files = input_dir.glob('*.nc')
cubes = iris.load(str(file) for file in files)
for cube in cubes:
strip_attributes(cube)
simplify_time(cube)
cube = cubes.concatenate_cube()
if VERBOSE:
print(' ', cube.shape, cube.coord('time').units.calendar)
yield cube
def filter_ignored_datasets(dirs, root):
"""Filter datasets defined in the global `ignore` list."""
for drc in dirs:
test_drc = str(drc.relative_to(root))
if test_drc not in ignore_list:
yield drc
elif VERBOSE:
print('Ignored:', test_drc)
def load_timeseries_cubes(mip_table: str = 'Amon') -> list:
"""Returns a list of iris cubes with timeseries data.
The data are: ta / Amon / historical / r1i1p1f1, any grid, 1950 - onwards.
All dimensions were reduced to a few steps except for the time dimension.
Parameters
----------
mip_table: str
select monthly (`Amon`, default) or daily (`day`) data.
Returns
-------
cubes: list
list of iris.cube.Cube
"""
timeseries_dir = base_dir / 'data' / 'timeseries'
paths = timeseries_dir.glob(f'**/{mip_table}/**/*.nc')
input_dirs = list(set(path.parent for path in paths))
input_dirs = list(filter_ignored_datasets(input_dirs, timeseries_dir))
cubes = load_cubes_from_input_dirs(input_dirs)
return list(cubes)
def get_rootpaths() -> dict:
"""Return a dict with rootpaths to update the user config in ESMValTool."""
rootpath = {
'rootpath': {
'CMIP6': [
str(base_dir / 'data' / 'timeseries' / 'CMIP6'),
]
},
'drs': {
'CMIP6': 'default',
},
}
return rootpath
if __name__ == '__main__':
VERBOSE = True
for mip_table in (
'Amon',
'day',
):
print()
print(f'Loading `{mip_table}`')
ts = load_timeseries_cubes(mip_table)
first_cube = ts[0]
for i, cube in enumerate(ts):
print(i)
cube.regrid(grid=first_cube, scheme=iris.analysis.Linear())
# breakpoint() | PypiClean |
/EnergySystemModels-0.1.17.post63-py3-none-any.whl/NodeEditor/nodeeditor/node_graphics_view.py | from PyQt5.QtWidgets import QGraphicsView, QApplication
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from NodeEditor.nodeeditor.node_graphics_socket import QDMGraphicsSocket
from NodeEditor.nodeeditor.node_graphics_edge import QDMGraphicsEdge
from NodeEditor.nodeeditor.node_edge_dragging import EdgeDragging
from NodeEditor.nodeeditor.node_graphics_cutline import QDMCutLine
from NodeEditor.nodeeditor.utils import dumpException
MODE_NOOP = 1 #: Mode representing ready state
MODE_EDGE_DRAG = 2 #: Mode representing when we drag edge state
MODE_EDGE_CUT = 3 #: Mode representing when we draw a cutting edge
#: Distance when click on socket to enable `Drag Edge`
EDGE_DRAG_START_THRESHOLD = 50
DEBUG = False
DEBUG_MMB_SCENE_ITEMS = False
DEBUG_MMB_LAST_SELECTIONS = False
class QDMGraphicsView(QGraphicsView):
"""Class representing NodeEditor's `Graphics View`"""
#: pyqtSignal emitted when cursor position on the `Scene` has changed
scenePosChanged = pyqtSignal(int, int)
def __init__(self, grScene:'QDMGraphicsScene', parent:'QWidget'=None):
"""
:param grScene: reference to the :class:`~nodeeditor.node_graphics_scene.QDMGraphicsScene`
:type grScene: :class:`~nodeeditor.node_graphics_scene.QDMGraphicsScene`
:param parent: parent widget
:type parent: ``QWidget``
:Instance Attributes:
- **grScene** - reference to the :class:`~nodeeditor.node_graphics_scene.QDMGraphicsScene`
- **mode** - state of the `Graphics View`
- **zoomInFactor**- ``float`` - zoom step scaling, default 1.25
- **zoomClamp** - ``bool`` - do we clamp zooming or is it infinite?
- **zoom** - current zoom step
- **zoomStep** - ``int`` - the relative zoom step when zooming in/out
- **zoomRange** - ``[min, max]``
"""
super().__init__(parent)
self.grScene = grScene
self.initUI()
self.setScene(self.grScene)
self.mode = MODE_NOOP
self.editingFlag = False
self.rubberBandDraggingRectangle = False
# edge dragging
self.dragging = EdgeDragging(self)
# cutline
self.cutline = QDMCutLine()
self.grScene.addItem(self.cutline)
self.last_scene_mouse_position = QPoint(0,0)
self.zoomInFactor = 1.25
self.zoomClamp = True
self.zoom = 10
self.zoomStep = 1
self.zoomRange = [0, 10]
# listeners
self._drag_enter_listeners = []
self._drop_listeners = []
def initUI(self):
"""Set up this ``QGraphicsView``"""
self.setRenderHints(QPainter.Antialiasing | QPainter.HighQualityAntialiasing | QPainter.TextAntialiasing | QPainter.SmoothPixmapTransform)
self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
self.setDragMode(QGraphicsView.RubberBandDrag)
# enable dropping
self.setAcceptDrops(True)
def resetMode(self):
"""Helper function to re-set the grView's State Machine state to the default"""
self.mode = MODE_NOOP
def dragEnterEvent(self, event:QDragEnterEvent):
"""Trigger our registered `Drag Enter` events"""
for callback in self._drag_enter_listeners: callback(event)
def dropEvent(self, event:QDropEvent):
"""Trigger our registered `Drop` events"""
for callback in self._drop_listeners: callback(event)
def addDragEnterListener(self, callback:'function'):
"""
Register callback for `Drag Enter` event
:param callback: callback function
"""
self._drag_enter_listeners.append(callback)
def addDropListener(self, callback:'function'):
"""
Register callback for `Drop` event
:param callback: callback function
"""
self._drop_listeners.append(callback)
def mousePressEvent(self, event:QMouseEvent):
"""Dispatch Qt's mousePress event to corresponding function below"""
if event.button() == Qt.MiddleButton:
self.middleMouseButtonPress(event)
elif event.button() == Qt.LeftButton:
self.leftMouseButtonPress(event)
elif event.button() == Qt.RightButton:
self.rightMouseButtonPress(event)
else:
super().mousePressEvent(event)
def mouseReleaseEvent(self, event:QMouseEvent):
"""Dispatch Qt's mouseRelease event to corresponding function below"""
if event.button() == Qt.MiddleButton:
self.middleMouseButtonRelease(event)
elif event.button() == Qt.LeftButton:
self.leftMouseButtonRelease(event)
elif event.button() == Qt.RightButton:
self.rightMouseButtonRelease(event)
else:
super().mouseReleaseEvent(event)
def middleMouseButtonPress(self, event:QMouseEvent):
"""When Middle mouse button was pressed"""
item = self.getItemAtClick(event)
# debug print out
if DEBUG_MMB_SCENE_ITEMS:
if isinstance(item, QDMGraphicsEdge):
print("MMB DEBUG:", item.edge, "\n\t", item.edge.grEdge if item.edge.grEdge is not None else None)
return
if isinstance(item, QDMGraphicsSocket):
print("MMB DEBUG:", item.socket, "socket_type:", item.socket.socket_type,
"has edges:", "no" if item.socket.edges == [] else "")
if item.socket.edges:
for edge in item.socket.edges: print("\t", edge)
return
if DEBUG_MMB_SCENE_ITEMS and (item is None):
print("SCENE:")
print(" Nodes:")
for node in self.grScene.scene.nodes: print("\t", node)
print(" Edges:")
for edge in self.grScene.scene.edges: print("\t", edge, "\n\t\tgrEdge:", edge.grEdge if edge.grEdge is not None else None)
if event.modifiers() & Qt.CTRL:
print(" Graphic Items in GraphicScene:")
for item in self.grScene.items():
print(' ', item)
if DEBUG_MMB_LAST_SELECTIONS and event.modifiers() & Qt.SHIFT:
print("scene _last_selected_items:", self.grScene.scene._last_selected_items)
return
# faking events for enable MMB dragging the scene
releaseEvent = QMouseEvent(QEvent.MouseButtonRelease, event.localPos(), event.screenPos(),
Qt.LeftButton, Qt.NoButton, event.modifiers())
super().mouseReleaseEvent(releaseEvent)
self.setDragMode(QGraphicsView.ScrollHandDrag)
fakeEvent = QMouseEvent(event.type(), event.localPos(), event.screenPos(),
Qt.LeftButton, event.buttons() | Qt.LeftButton, event.modifiers())
super().mousePressEvent(fakeEvent)
def middleMouseButtonRelease(self, event:QMouseEvent):
"""When Middle mouse button was released"""
fakeEvent = QMouseEvent(event.type(), event.localPos(), event.screenPos(),
Qt.LeftButton, event.buttons() & ~Qt.LeftButton, event.modifiers())
super().mouseReleaseEvent(fakeEvent)
self.setDragMode(QGraphicsView.RubberBandDrag)
def leftMouseButtonPress(self, event:QMouseEvent):
"""When Left mouse button was pressed"""
# get item which we clicked on
item = self.getItemAtClick(event)
# we store the position of last LMB click
self.last_lmb_click_scene_pos = self.mapToScene(event.pos())
# if DEBUG: print("LMB Click on", item, self.debug_modifiers(event))
# logic
if hasattr(item, "node") or isinstance(item, QDMGraphicsEdge) or item is None:
if event.modifiers() & Qt.ShiftModifier:
event.ignore()
fakeEvent = QMouseEvent(QEvent.MouseButtonPress, event.localPos(), event.screenPos(),
Qt.LeftButton, event.buttons() | Qt.LeftButton,
event.modifiers() | Qt.ControlModifier)
super().mousePressEvent(fakeEvent)
return
if isinstance(item, QDMGraphicsSocket):
if self.mode == MODE_NOOP:
self.mode = MODE_EDGE_DRAG
self.dragging.edgeDragStart(item)
return
if self.mode == MODE_EDGE_DRAG:
res = self.dragging.edgeDragEnd(item)
if res: return
if item is None:
if event.modifiers() & Qt.ControlModifier:
self.mode = MODE_EDGE_CUT
fakeEvent = QMouseEvent(QEvent.MouseButtonRelease, event.localPos(), event.screenPos(),
Qt.LeftButton, Qt.NoButton, event.modifiers())
super().mouseReleaseEvent(fakeEvent)
QApplication.setOverrideCursor(Qt.CrossCursor)
return
else:
self.rubberBandDraggingRectangle = True
super().mousePressEvent(event)
def leftMouseButtonRelease(self, event:QMouseEvent):
"""When Left mouse button was released"""
# get item which we release mouse button on
item = self.getItemAtClick(event)
try:
# logic
if hasattr(item, "node") or isinstance(item, QDMGraphicsEdge) or item is None:
if event.modifiers() & Qt.ShiftModifier:
event.ignore()
fakeEvent = QMouseEvent(event.type(), event.localPos(), event.screenPos(),
Qt.LeftButton, Qt.NoButton,
event.modifiers() | Qt.ControlModifier)
super().mouseReleaseEvent(fakeEvent)
return
if self.mode == MODE_EDGE_DRAG:
if self.distanceBetweenClickAndReleaseIsOff(event):
res = self.dragging.edgeDragEnd(item)
if res: return
if self.mode == MODE_EDGE_CUT:
self.cutIntersectingEdges()
self.cutline.line_points = []
self.cutline.update()
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.mode = MODE_NOOP
return
if self.rubberBandDraggingRectangle:
self.rubberBandDraggingRectangle = False
current_selected_items = self.grScene.selectedItems()
if current_selected_items != self.grScene.scene._last_selected_items:
if current_selected_items == []:
self.grScene.itemsDeselected.emit()
else:
self.grScene.itemSelected.emit()
self.grScene.scene._last_selected_items = current_selected_items
return
# otherwise deselect everything
if item is None:
self.grScene.itemsDeselected.emit()
except: dumpException()
super().mouseReleaseEvent(event)
def rightMouseButtonPress(self, event:QMouseEvent):
"""When Right mouse button was pressed"""
super().mousePressEvent(event)
def rightMouseButtonRelease(self, event:QMouseEvent):
"""When Right mouse button was release"""
## cannot be because with dragging RMB we spawn Create New Node Context Menu
## However, you could use this if you want to cancel with RMB
# if self.mode == MODE_EDGE_DRAG:
# self.dragging.edgeDragEnd(None)
# return
super().mouseReleaseEvent(event)
def mouseMoveEvent(self, event:QMouseEvent):
"""Overriden Qt's ``mouseMoveEvent`` handling Scene/View logic"""
scenepos = self.mapToScene(event.pos())
if self.mode == MODE_EDGE_DRAG:
self.dragging.updateDestination(scenepos.x(), scenepos.y())
if self.mode == MODE_EDGE_CUT and self.cutline is not None:
self.cutline.line_points.append(scenepos)
self.cutline.update()
self.last_scene_mouse_position = scenepos
self.scenePosChanged.emit( int(scenepos.x()), int(scenepos.y()) )
super().mouseMoveEvent(event)
def keyPressEvent(self, event:QKeyEvent):
"""
.. note::
This overriden Qt's method was used for handling key shortcuts, before we implemented propper
``QWindow`` with Actions and Menu. Still the commented code serves as an example how to handle
key presses without Qt's framework for Actions and shortcuts. There can be also found an example
how to solve the problem when Node does contain Text/LineEdit and we press `Delete`
key (also serving to delete `Node`)
:param event: Qt's Key event
:type event: ``QKeyEvent``
:return:
"""
# Use this code below if you wanna have shortcuts in this widget.
# You want to use this, when you don't have a window which handles these shortcuts for you
# if event.key() == Qt.Key_Delete:
# if not self.editingFlag:
# self.deleteSelected()
# else:
# super().keyPressEvent(event)
# elif event.key() == Qt.Key_S and event.modifiers() & Qt.ControlModifier:
# self.grScene.scene.saveToFile("graph.json")
# elif event.key() == Qt.Key_L and event.modifiers() & Qt.ControlModifier:
# self.grScene.scene.loadFromFile("graph.json")
# elif event.key() == Qt.Key_Z and event.modifiers() & Qt.ControlModifier and not event.modifiers() & Qt.ShiftModifier:
# self.grScene.scene.history.undo()
# elif event.key() == Qt.Key_Z and event.modifiers() & Qt.ControlModifier and event.modifiers() & Qt.ShiftModifier:
# self.grScene.scene.history.redo()
# elif event.key() == Qt.Key_H:
# print("HISTORY: len(%d)" % len(self.grScene.scene.history.history_stack),
# " -- current_step", self.grScene.scene.history.history_current_step)
# ix = 0
# for item in self.grScene.scene.history.history_stack:
# print("#", ix, "--", item['desc'])
# ix += 1
# else:
super().keyPressEvent(event)
def cutIntersectingEdges(self):
"""Compare which `Edges` intersect with current `Cut line` and delete them safely"""
for ix in range(len(self.cutline.line_points) - 1):
p1 = self.cutline.line_points[ix]
p2 = self.cutline.line_points[ix + 1]
# @TODO: we could collect all touched nodes, and notify them once after all edges removed
# we could cut 3 edges leading to a single nodeeditor this will notify it 3x
# maybe we could use some Notifier class with methods collect() and dispatch()
for edge in self.grScene.scene.edges.copy():
if edge.grEdge.intersectsWith(p1, p2):
edge.remove()
self.grScene.scene.history.storeHistory("Delete cutted edges", setModified=True)
def deleteSelected(self):
"""Shortcut for safe deleting every object selected in the `Scene`."""
for item in self.grScene.selectedItems():
if isinstance(item, QDMGraphicsEdge):
item.edge.remove()
elif hasattr(item, 'node'):
item.node.remove()
self.grScene.scene.history.storeHistory("Delete selected", setModified=True)
def debug_modifiers(self, event):
"""Helper function get string if we hold Ctrl, Shift or Alt modifier keys"""
out = "MODS: "
if event.modifiers() & Qt.ShiftModifier: out += "SHIFT "
if event.modifiers() & Qt.ControlModifier: out += "CTRL "
if event.modifiers() & Qt.AltModifier: out += "ALT "
return out
def getItemAtClick(self, event:QEvent) -> 'QGraphicsItem':
"""Return the object on which we've clicked/release mouse button
:param event: Qt's mouse or key event
:type event: ``QEvent``
:return: ``QGraphicsItem`` which the mouse event happened or ``None``
"""
pos = event.pos()
obj = self.itemAt(pos)
return obj
def distanceBetweenClickAndReleaseIsOff(self, event:QMouseEvent) -> bool:
""" Measures if we are too far from the last Mouse button click scene position.
This is used for detection if we release too far after we clicked on a `Socket`
:param event: Qt's mouse event
:type event: ``QMouseEvent``
:return: ``True`` if we released too far from where we clicked before
"""
new_lmb_release_scene_pos = self.mapToScene(event.pos())
dist_scene = new_lmb_release_scene_pos - self.last_lmb_click_scene_pos
edge_drag_threshold_sq = EDGE_DRAG_START_THRESHOLD*EDGE_DRAG_START_THRESHOLD
return (dist_scene.x()*dist_scene.x() + dist_scene.y()*dist_scene.y()) > edge_drag_threshold_sq
def wheelEvent(self, event:QWheelEvent):
"""overriden Qt's ``wheelEvent``. This handles zooming"""
# calculate our zoom Factor
zoomOutFactor = 1 / self.zoomInFactor
# calculate zoom
if event.angleDelta().y() > 0:
zoomFactor = self.zoomInFactor
self.zoom += self.zoomStep
else:
zoomFactor = zoomOutFactor
self.zoom -= self.zoomStep
clamped = False
if self.zoom < self.zoomRange[0]: self.zoom, clamped = self.zoomRange[0], True
if self.zoom > self.zoomRange[1]: self.zoom, clamped = self.zoomRange[1], True
# set scene scale
if not clamped or self.zoomClamp is False:
self.scale(zoomFactor, zoomFactor) | PypiClean |
/GraphQL_core_next-1.1.1-py3-none-any.whl/graphql/language/print_location.py | import re
from typing import Optional, Tuple, cast
from .ast import Location
from .location import SourceLocation, get_location
from .source import Source
__all__ = ["print_location", "print_source_location"]
def print_location(location: Location) -> str:
"""Render a helpful description of the location in the GraphQL Source document."""
return print_source_location(
location.source, get_location(location.source, location.start)
)
_re_newline = re.compile(r"\r\n|[\n\r]")
def print_source_location(source: Source, source_location: SourceLocation) -> str:
"""Render a helpful description of the location in the GraphQL Source document."""
first_line_column_offset = source.location_offset.column - 1
body = " " * first_line_column_offset + source.body
line_index = source_location.line - 1
line_offset = source.location_offset.line - 1
line_num = source_location.line + line_offset
column_offset = first_line_column_offset if source_location.line == 1 else 0
column_num = source_location.column + column_offset
location_str = f"{source.name}:{line_num}:{column_num}\n"
lines = _re_newline.split(body) # works a bit different from splitlines()
location_line = lines[line_index]
# Special case for minified documents
if len(location_line) > 120:
subline_index, subline_column_num = divmod(column_num, 80)
sublines = [location_line[i : i + 80] for i in range(0, len(location_line), 80)]
return location_str + print_prefixed_lines(
(str(line_num), sublines[0]),
*[("", subline) for subline in sublines[1 : subline_index + 1]],
(" ", " " * (subline_column_num - 1) + "^"),
(
"",
sublines[subline_index + 1]
if subline_index < len(sublines) - 1
else None,
),
)
return location_str + print_prefixed_lines(
(f"{line_num - 1}", lines[line_index - 1] if line_index > 0 else None),
(f"{line_num}", location_line),
("", " " * (column_num - 1) + "^"),
(
f"{line_num + 1}",
lines[line_index + 1] if line_index < len(lines) - 1 else None,
),
)
def print_prefixed_lines(*lines: Tuple[str, Optional[str]]) -> str:
"""Print lines specified like this: ("prefix", "string")"""
existing_lines = [
cast(Tuple[str, str], line) for line in lines if line[1] is not None
]
pad_len = max(len(line[0]) for line in existing_lines)
return "\n".join(
map(lambda line: line[0].rjust(pad_len) + " | " + line[1], existing_lines)
) | PypiClean |
/Cohen-0.7.4.tar.gz/Cohen-0.7.4/coherence/transcoder.py |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <[email protected]>
""" transcoder classes to be used in combination with
a Coherence MediaServer
using GStreamer pipelines for the actually work
and feeding the output into a http response
"""
import pygst
pygst.require('0.10')
import gst
import gobject
gobject.threads_init()
import os.path
import urllib
from twisted.web import resource, server
from twisted.internet import protocol
from coherence import log
import struct
def get_transcoder_name(transcoder):
return transcoder.name
class InternalTranscoder(object):
""" just a class to inherit from and
which we can look for upon creating our
list of available transcoders
"""
class FakeTransformer(gst.Element, log.Loggable):
logCategory = 'faker_datasink'
_sinkpadtemplate = gst.PadTemplate("sinkpadtemplate",
gst.PAD_SINK,
gst.PAD_ALWAYS,
gst.caps_new_any())
_srcpadtemplate = gst.PadTemplate("srcpadtemplate",
gst.PAD_SRC,
gst.PAD_ALWAYS,
gst.caps_new_any())
def __init__(self, destination=None, request=None):
gst.Element.__init__(self)
log.Loggable.__init__(self)
self.sinkpad = gst.Pad(self._sinkpadtemplate, "sink")
self.srcpad = gst.Pad(self._srcpadtemplate, "src")
self.add_pad(self.sinkpad)
self.add_pad(self.srcpad)
self.sinkpad.set_chain_function(self.chainfunc)
self.buffer = ''
self.buffer_size = 0
self.proxy = False
self.got_new_segment = False
self.closed = False
def get_fake_header(self):
return struct.pack(">L4s", 32, 'ftyp') + \
"mp42\x00\x00\x00\x00mp42mp41isomiso2"
def chainfunc(self, pad, buffer):
if self.proxy:
# we are in proxy mode already
self.srcpad.push(buffer)
return gst.FLOW_OK
self.buffer = self.buffer + buffer.data
if not self.buffer_size:
try:
self.buffer_size, a_type = struct.unpack(">L4s", self.buffer[:8])
except:
return gst.FLOW_OK
if len(self.buffer) < self.buffer_size:
# we need to buffer more
return gst.FLOW_OK
buffer = self.buffer[self.buffer_size:]
fake_header = self.get_fake_header()
n_buf = gst.Buffer(fake_header + buffer)
self.proxy = True
self.srcpad.push(n_buf)
return gst.FLOW_OK
gobject.type_register(FakeTransformer)
class DataSink(gst.Element, log.Loggable):
logCategory = 'transcoder_datasink'
_sinkpadtemplate = gst.PadTemplate("sinkpadtemplate",
gst.PAD_SINK,
gst.PAD_ALWAYS,
gst.caps_new_any())
def __init__(self, destination=None, request=None):
gst.Element.__init__(self)
log.Loggable.__init__(self)
self.sinkpad = gst.Pad(self._sinkpadtemplate, "sink")
self.add_pad(self.sinkpad)
self.sinkpad.set_chain_function(self.chainfunc)
self.sinkpad.set_event_function(self.eventfunc)
self.destination = destination
self.request = request
if self.destination is not None:
self.destination = open(self.destination, 'wb')
self.buffer = ''
self.data_size = 0
self.got_new_segment = False
self.closed = False
def chainfunc(self, pad, buffer):
if self.closed:
return gst.FLOW_OK
if self.destination is not None:
self.destination.write(buffer.data)
elif self.request is not None:
self.buffer += buffer.data
if len(self.buffer) > 200000:
self.request.write(self.buffer)
self.buffer = ''
else:
self.buffer += buffer.data
self.data_size += buffer.size
return gst.FLOW_OK
def eventfunc(self, pad, event):
if event.type == gst.EVENT_NEWSEGMENT:
if not self.got_new_segment:
self.got_new_segment = True
else:
self.closed = True
elif event.type == gst.EVENT_EOS:
if self.destination is not None:
self.destination.close()
elif self.request is not None:
if len(self.buffer) > 0:
self.request.write(self.buffer)
self.request.finish()
return True
gobject.type_register(DataSink)
class GStreamerPipeline(resource.Resource, log.Loggable):
logCategory = 'gstreamer'
addSlash = True
def __init__(self, pipeline, content_type):
self.pipeline_description = pipeline
self.contentType = content_type
self.requests = []
# if stream has a streamheader (something that has to be prepended
# before any data), then it will be a tuple of GstBuffers
self.streamheader = None
self.parse_pipeline()
resource.Resource.__init__(self)
log.Loggable.__init__(self)
def parse_pipeline(self):
self.pipeline = gst.parse_launch(self.pipeline_description)
self.appsink = gst.element_factory_make("appsink", "sink")
self.appsink.set_property('emit-signals', True)
self.pipeline.add(self.appsink)
enc = self.pipeline.get_by_name("enc")
enc.link(self.appsink)
self.appsink.connect("new-preroll", self.new_preroll)
self.appsink.connect("new-buffer", self.new_buffer)
self.appsink.connect("eos", self.eos)
def start(self, request=None):
self.info("GStreamerPipeline start %r %r", request,
self.pipeline_description)
self.requests.append(request)
self.pipeline.set_state(gst.STATE_PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished, request)
def new_preroll(self, appsink):
self.debug("new preroll")
buffer = appsink.emit('pull-preroll')
if not self.streamheader:
# check caps for streamheader buffer
caps = buffer.get_caps()
s = caps[0]
if s.has_key("streamheader"):
self.streamheader = s["streamheader"]
self.debug("setting streamheader")
for r in self.requests:
self.debug("writing streamheader")
for h in self.streamheader:
r.write(h.data)
for r in self.requests:
self.debug("writing preroll")
r.write(buffer.data)
def new_buffer(self, appsink):
buffer = appsink.emit('pull-buffer')
if not self.streamheader:
# check caps for streamheader buffers
caps = buffer.get_caps()
s = caps[0]
if s.has_key("streamheader"):
self.streamheader = s["streamheader"]
self.debug("setting streamheader")
for r in self.requests:
self.debug("writing streamheader")
for h in self.streamheader:
r.write(h.data)
for r in self.requests:
r.write(buffer.data)
def eos(self, appsink):
self.info("eos")
for r in self.requests:
r.finish()
self.cleanup()
def getChild(self, name, request):
self.info('getChild %s, %s', name, request)
return self
def render_GET(self, request):
self.info('render GET %r', request)
request.setResponseCode(200)
if hasattr(self, 'contentType'):
request.setHeader('Content-Type', self.contentType)
request.write('')
headers = request.getAllHeaders()
if('connection' in headers and
headers['connection'] == 'close'):
pass
if self.requests:
if self.streamheader:
self.debug("writing streamheader")
for h in self.streamheader:
request.write(h.data)
self.requests.append(request)
else:
self.parse_pipeline()
self.start(request)
return server.NOT_DONE_YET
def render_HEAD(self, request):
self.info('render HEAD %r', request)
request.setResponseCode(200)
request.setHeader('Content-Type', self.contentType)
request.write('')
def requestFinished(self, result, request):
self.info("requestFinished %r", result)
""" we need to find a way to destroy the pipeline here
"""
#from twisted.internet import reactor
#reactor.callLater(0, self.pipeline.set_state, gst.STATE_NULL)
self.requests.remove(request)
if not self.requests:
self.cleanup()
def on_message(self, bus, message):
t = message.type
print "on_message", t
if t == gst.MESSAGE_ERROR:
#err, debug = message.parse_error()
#print "Error: %s" % err, debug
self.cleanup()
elif t == gst.MESSAGE_EOS:
self.cleanup()
def cleanup(self):
self.info("pipeline cleanup")
self.pipeline.set_state(gst.STATE_NULL)
self.requests = []
self.streamheader = None
class BaseTranscoder(resource.Resource, log.Loggable):
logCategory = 'transcoder'
addSlash = True
def __init__(self, uri, destination=None):
self.info('uri %s %r', uri, type(uri))
if uri[:7] not in ['file://', 'http://']:
uri = 'file://' + urllib.quote(uri) # FIXME
self.uri = uri
self.destination = destination
resource.Resource.__init__(self)
log.Loggable.__init__(self)
def getChild(self, name, request):
self.info('getChild %s, %s', name, request)
return self
def render_GET(self, request):
self.info('render GET %r', request)
request.setResponseCode(200)
if hasattr(self, 'contentType'):
request.setHeader('Content-Type', self.contentType)
request.write('')
headers = request.getAllHeaders()
if('connection' in headers and
headers['connection'] == 'close'):
pass
self.start(request)
return server.NOT_DONE_YET
def render_HEAD(self, request):
self.info('render HEAD %r', request)
request.setResponseCode(200)
request.setHeader('Content-Type', self.contentType)
request.write('')
def requestFinished(self, result):
self.info("requestFinished %r", result)
""" we need to find a way to destroy the pipeline here
"""
#from twisted.internet import reactor
#reactor.callLater(0, self.pipeline.set_state, gst.STATE_NULL)
gobject.idle_add(self.cleanup)
def on_message(self, bus, message):
t = message.type
print "on_message", t
if t == gst.MESSAGE_ERROR:
#err, debug = message.parse_error()
#print "Error: %s" % err, debug
self.cleanup()
elif t == gst.MESSAGE_EOS:
self.cleanup()
def cleanup(self):
self.pipeline.set_state(gst.STATE_NULL)
class PCMTranscoder(BaseTranscoder, InternalTranscoder):
contentType = 'audio/L16;rate=44100;channels=2'
name = 'lpcm'
def start(self, request=None):
self.info("PCMTranscoder start %r %r", request, self.uri)
self.pipeline = gst.parse_launch(
"%s ! decodebin ! audioconvert name=conv" % self.uri)
conv = self.pipeline.get_by_name('conv')
caps = gst.Caps("audio/x-raw-int,rate=44100,endianness=4321,channels=2,width=16,depth=16,signed=true")
#FIXME: UGLY. 'filter' is a python builtin!
filter = gst.element_factory_make("capsfilter", "filter")
filter.set_property("caps", caps)
self.pipeline.add(filter)
conv.link(filter)
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
filter.link(sink)
self.pipeline.set_state(gst.STATE_PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class WAVTranscoder(BaseTranscoder, InternalTranscoder):
contentType = 'audio/x-wav'
name = 'wav'
def start(self, request=None):
self.info("start %r", request)
self.pipeline = gst.parse_launch(
"%s ! decodebin ! audioconvert ! wavenc name=enc" % self.uri)
enc = self.pipeline.get_by_name('enc')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
#bus = self.pipeline.get_bus()
#bus.connect('message', self.on_message)
self.pipeline.set_state(gst.STATE_PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class MP3Transcoder(BaseTranscoder, InternalTranscoder):
contentType = 'audio/mpeg'
name = 'mp3'
def start(self, request=None):
self.info("start %r", request)
self.pipeline = gst.parse_launch(
"%s ! decodebin ! audioconvert ! lame name=enc" % self.uri)
enc = self.pipeline.get_by_name('enc')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
self.pipeline.set_state(gst.STATE_PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class MP4Transcoder(BaseTranscoder, InternalTranscoder):
""" Only works if H264 inside Quicktime/MP4 container is input
Source has to be a valid uri
"""
contentType = 'video/mp4'
name = 'mp4'
def start(self, request=None):
self.info("start %r", request)
self.pipeline = gst.parse_launch(
"%s ! qtdemux name=d ! queue ! h264parse ! mp4mux name=mux d. ! queue ! mux." % self.uri)
mux = self.pipeline.get_by_name('mux')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
mux.link(sink)
self.pipeline.set_state(gst.STATE_PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class MP2TSTranscoder(BaseTranscoder, InternalTranscoder):
contentType = 'video/mpeg'
name = 'mpegts'
def start(self, request=None):
self.info("start %r", request)
### FIXME mpeg2enc
self.pipeline = gst.parse_launch(
"mpegtsmux name=mux %s ! decodebin2 name=d ! queue ! ffmpegcolorspace ! mpeg2enc ! queue ! mux. d. ! queue ! audioconvert ! twolame ! queue ! mux." % self.uri)
enc = self.pipeline.get_by_name('mux')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
self.pipeline.set_state(gst.STATE_PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class ThumbTranscoder(BaseTranscoder, InternalTranscoder):
""" should create a valid thumbnail according to the DLNA spec
neither width nor height must exceed 160px
"""
contentType = 'image/jpeg'
name = 'thumb'
def start(self, request=None):
self.info("start %r", request)
""" what we actually want here is a pipeline that calls
us when it knows about the size of the original image,
and allows us now to adjust the caps-filter with the
calculated values for width and height
new_width = 160
new_height = 160
if original_width > 160:
new_heigth = int(float(original_height) * (160.0/float(original_width)))
if new_height > 160:
new_width = int(float(new_width) * (160.0/float(new_height)))
elif original_height > 160:
new_width = int(float(original_width) * (160.0/float(original_height)))
"""
try:
type = request.args['type'][0]
except:
type = 'jpeg'
if type == 'png':
self.pipeline = gst.parse_launch(
"%s ! decodebin2 ! videoscale ! video/x-raw-yuv,width=160,height=160 ! pngenc name=enc" % self.uri)
self.contentType = 'image/png'
else:
self.pipeline = gst.parse_launch(
"%s ! decodebin2 ! videoscale ! video/x-raw-yuv,width=160,height=160 ! jpegenc name=enc" % self.uri)
self.contentType = 'image/jpeg'
enc = self.pipeline.get_by_name('enc')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
self.pipeline.set_state(gst.STATE_PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class GStreamerTranscoder(BaseTranscoder):
""" a generic Transcode based on GStreamer
the pipeline which will be parsed upon
calling the start method, as to be set as
the attribute pipeline_description to the
instantiated class
same for the attribute contentType
"""
def start(self, request=None):
self.info("start %r", request)
self.pipeline = gst.parse_launch(self.pipeline_description % self.uri)
enc = self.pipeline.get_by_name('mux')
sink = DataSink(destination=self.destination, request=request)
self.pipeline.add(sink)
enc.link(sink)
self.pipeline.set_state(gst.STATE_PLAYING)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
class ExternalProcessProtocol(protocol.ProcessProtocol):
def __init__(self, caller):
self.caller = caller
def connectionMade(self):
print "pp connection made"
def outReceived(self, data):
#print "outReceived with %d bytes!" % len(data)
self.caller.write_data(data)
def errReceived(self, data):
#print "errReceived! with %d bytes!" % len(data)
print "pp (err):", data.strip()
def inConnectionLost(self):
#print "inConnectionLost! stdin is closed! (we probably did it)"
pass
def outConnectionLost(self):
#print "outConnectionLost! The child closed their stdout!"
pass
def errConnectionLost(self):
#print "errConnectionLost! The child closed their stderr."
pass
def processEnded(self, status_object):
print "processEnded, status %d" % status_object.value.exitCode
print "processEnded quitting"
self.caller.ended = True
self.caller.write_data('')
class ExternalProcessProducer(object):
logCategory = 'externalprocess'
def __init__(self, pipeline, request):
self.pipeline = pipeline
self.request = request
self.process = None
self.written = 0
self.data = ''
self.ended = False
request.registerProducer(self, 0)
def write_data(self, data):
if data:
#print "write %d bytes of data" % len(data)
self.written += len(data)
# this .write will spin the reactor, calling .doWrite and then
# .resumeProducing again, so be prepared for a re-entrant call
self.request.write(data)
if self.request and self.ended:
print "closing"
self.request.unregisterProducer()
self.request.finish()
self.request = None
def resumeProducing(self):
#print "resumeProducing", self.request
if not self.request:
return
if self.process is None:
argv = self.pipeline.split()
executable = argv[0]
argv[0] = os.path.basename(argv[0])
from twisted.internet import reactor
self.process = reactor.spawnProcess(ExternalProcessProtocol(self),
executable, argv, {})
def pauseProducing(self):
pass
def stopProducing(self):
print "stopProducing", self.request
self.request.unregisterProducer()
self.process.loseConnection()
self.request.finish()
self.request = None
class ExternalProcessPipeline(resource.Resource, log.Loggable):
logCategory = 'externalprocess'
addSlash = False
def __init__(self, uri):
self.uri = uri
resource.Resource.__init__(self)
log.Loggable.__init__(self)
def getChildWithDefault(self, path, request):
return self
def render(self, request):
print "ExternalProcessPipeline render"
try:
if self.contentType:
request.setHeader('Content-Type', self.contentType)
except AttributeError:
pass
ExternalProcessProducer(self.pipeline_description % self.uri, request)
return server.NOT_DONE_YET
def transcoder_class_wrapper(klass, content_type, pipeline):
def create_object(uri):
transcoder = klass(uri)
transcoder.contentType = content_type
transcoder.pipeline_description = pipeline
return transcoder
return create_object
class TranscoderManager(log.Loggable):
""" singleton class which holds information
about all available transcoders
they are put into a transcoders dict with
their id as the key
we collect all internal transcoders by searching
for all subclasses of InternalTranscoder, the class
will be the value
transcoders defined in the config are parsed and
stored as a dict in the transcoders dict
in the config a transcoder description has to look like this:
*** preliminary, will be extended and might even change without further notice ***
<transcoder>
<pipeline>%s ...</pipeline> <!-- we need a %s here to insert the source uri
(or can we have all the times pipelines we can prepend
with a '%s !')
and an element named mux where we can attach
our sink -->
<type>gstreamer</type> <!-- could be gstreamer or process -->
<name>mpegts</name>
<target>video/mpeg</target>
<fourth_field> <!-- value for the 4th field of the protocolInfo phalanx,
default is '*' -->
</transcoder>
"""
logCategory = 'transcoder_manager'
_instance_ = None # Singleton
def __new__(cls, *args, **kwargs):
""" creates the singleton """
if cls._instance_ is None:
obj = super(TranscoderManager, cls).__new__(cls, *args, **kwargs)
cls._instance_ = obj
return cls._instance_
def __init__(self, coherence=None):
""" initializes the class
it should be called at least once
with the main coherence class passed as an argument,
so we have access to the config
"""
log.Loggable.__init__(self)
self.transcoders = {}
for transcoder in InternalTranscoder.__subclasses__():
self.transcoders[get_transcoder_name(transcoder)] = transcoder
if coherence is not None:
self.coherence = coherence
try:
transcoders_from_config = self.coherence.config['transcoder']
if isinstance(transcoders_from_config, dict):
transcoders_from_config = [transcoders_from_config]
except KeyError:
transcoders_from_config = []
for transcoder in transcoders_from_config:
# FIXME: is anyone checking if all keys are given ?
pipeline = transcoder['pipeline']
if not '%s' in pipeline:
self.warning("Can't create transcoder %r:"
" missing placehoder '%%s' in 'pipeline'",
transcoder)
continue
try:
transcoder_name = transcoder['name'].decode('ascii')
except UnicodeEncodeError:
self.warning("Can't create transcoder %r:"
" the 'name' contains non-ascii letters",
transcoder)
continue
transcoder_type = transcoder['type'].lower()
if transcoder_type == 'gstreamer':
wrapped = transcoder_class_wrapper(GStreamerTranscoder,
transcoder['target'], transcoder['pipeline'])
elif transcoder_type == 'process':
wrapped = transcoder_class_wrapper(ExternalProcessPipeline,
transcoder['target'], transcoder['pipeline'])
else:
self.warning("unknown transcoder type %r", transcoder_type)
continue
self.transcoders[transcoder_name] = wrapped
#FIXME reduce that to info later
self.warning("available transcoders %r", self.transcoders)
def select(self, name, uri, backend=None):
# FIXME:why do we specify the name when trying to get it?
if backend is not None:
""" try to find a transcoder provided by the backend
and return that here,
if there isn't one continue with the ones
provided by the config or the internal ones
"""
pass
transcoder = self.transcoders[name](uri)
return transcoder
if __name__ == '__main__':
t = Transcoder(None) | PypiClean |
/CodeIntel-2.0.0b19-cp34-cp34m-macosx_10_12_x86_64.whl/codeintel/codeintel2/lib_srcs/node.js/0.6/tls.js | var tls = {};
/**
* Creates a new tls.Server.
* @param options
* @param secureConnectionListener
* @returns {tls.Server} a new tls.Server
*/
tls.createServer = function(options, secureConnectionListener) {}
/**
* Creates a new client connection to the given port and host. (If host
* defaults to localhost.) options should be an object which specifies
* @param port
* @param host=localhost
* @param options
* @param secureConnectListener
* @returns {tls.CleartextStream}
*/
tls.connect = function(port, host, options, secureConnectListener) {}
/**
* This class is a subclass of net.Server and has the same methods on it.
* @constructor
*/
tls.Server = function() {}
tls.Server.prototype = new net.Server();
/**
* The number of concurrent connections on the server.
*/
tls.Server.prototype.connections = 0;
/**
* Stops the server from accepting new connections. This function is
* asynchronous, the server is finally closed when the server emits a
* 'close' event.
*/
tls.Server.prototype.close = function() {}
/**
* Set this property to reject connections when the server's connection
* count gets high.
*/
tls.Server.prototype.maxConnections = 0;
/**
* Begin accepting connections on the specified port and host. If the host
* is omitted, the server will accept connections directed to any IPv4
* address (INADDR_ANY).
* @param port
* @param host
* @param callback
*/
tls.Server.prototype.listen = function(port, host, callback) {}
/**
* Add secure context that will be used if client request's SNI hostname is
* matching passed hostname (wildcards can be used). credentials can
* contain key, cert and ca.
* @param hostname
* @param credentials
*/
tls.Server.prototype.addContext = function(hostname, credentials) {}
/**
* Returns the bound address and port of the server as reported by the
* operating system.
* @returns {Object}
*/
tls.Server.prototype.address = function() {}
/** @__local__ */ tls.Server.__events__ = {};
/**
* This event is emitted after a new connection has been successfully
* handshaked. The argument is a instance of CleartextStream. It has all
* the common stream methods and events. cleartextStream.authorized is a
* boolean value which indicates if the client has verified by one of the
* supplied certificate authorities for the server. If
* cleartextStream.authorized is false, then
* cleartextStream.authorizationError is set to describe how authorization
* failed. Implied but worth mentioning: depending on the settings of the
* TLS server, you unauthorized connections may be accepted.
* cleartextStream.npnProtocol is a string containing selected NPN
* protocol. cleartextStream.servername is a string containing servername
* requested with SNI.
* @param cleartextStream
*/
tls.Server.__events__.secureConnection = function(cleartextStream) {};
/**
* When a client connection emits an 'error' event before secure connection
* is established - it will be forwarded here.
* @param exception
*/
tls.Server.__events__.clientError = function(exception) {};
/**
* Creates a new secure pair object with two streams, one of which
* reads/writes encrypted data, and one reads/writes cleartext data.
* @param credentials
* @param isServer
* @param requestCert
* @param rejectUnauthorized
* @returns {tls.SecurePair}
*/
tls.createSecurePair = function(credentials, isServer, requestCert, rejectUnauthorized) {}
/**
* This is a stream on top of the Encrypted stream that makes it possible
* to read/write an encrypted data as a cleartext data.
* @constructor
*/
tls.CleartextStream = function() {}
tls.CleartextStream.prototype = new stream.ReadableStream();
tls.CleartextStream.prototype = new stream.WritableStream();
/**
* Returns an object representing the peer's certificate. The returned
* object has some properties corresponding to the field of the
* certificate.
* @returns {Object}
*/
tls.CleartextStream.prototype.getPeerCertificate = function() {}
/**
* Returns the bound address and port of the underlying socket as reported
* by the operating system. Returns an object with two properties, e.g.
* @returns {Object}
*/
tls.CleartextStream.prototype.address = function() {}
/**
* A boolean that is true if the peer certificate was signed by one of the
* specified CAs, otherwise false
* @type {Boolean}
*/
tls.CleartextStream.prototype.authorized = 0;
/**
* The reason why the peer's certificate has not been verified. This
* property becomes available only when cleartextStream.authorized ===
* false.
*/
tls.CleartextStream.prototype.authorizationError = 0;
/**
* The string representation of the remote IP address. For example,
* '74.125.127.100' or '2001:4860:a005::68'.
* @type {String}
*/
tls.CleartextStream.prototype.remoteAddress = 0;
/**
* The numeric representation of the remote port. For example, 443.
*/
tls.CleartextStream.prototype.remotePort = 0;
/** @__local__ */ tls.CleartextStream.__events__ = {};
/**
* This event is emitted after a new connection has been successfully
* handshaked. The listener will be called no matter if the server's
* certificate was authorized or not. It is up to the user to test
* cleartextStream.authorized to see if the server certificate was signed
* by one of the specified CAs. If cleartextStream.authorized === false
* then the error can be found in cleartextStream.authorizationError. Also
* if NPN was used - you can check cleartextStream.npnProtocol for
* negotiated protocol.
*/
tls.CleartextStream.__events__.secureConnect = function() {};
/**
* Returned by tls.createSecurePair.
* @constructor
*/
tls.SecurePair = function() {}
tls.SecurePair.prototype = new events.EventEmitter();
/** @__local__ */ tls.SecurePair.__events__ = {};
/**
* The event is emitted from the SecurePair once the pair has successfully
* established a secure connection. Similarly to the checking for the
* server 'secureConnection' event, pair.cleartext.authorized should be
* checked to confirm whether the certificate used properly authorized.
*/
tls.SecurePair.__events__.secure = function() {};
var events = require('events');
var net = require('net');
var stream = require('stream');
exports = tls; | PypiClean |
/Bluebook-0.0.1.tar.gz/Bluebook-0.0.1/pylot/component/static/pylot/vendor/mdeditor/bower_components/codemirror/mode/less/less.js | CodeMirror.defineMode("less", function(config) {
var indentUnit = config.indentUnit, type;
function ret(style, tp) {type = tp; return style;}
var selectors = /(^\:root$|^\:nth\-child$|^\:nth\-last\-child$|^\:nth\-of\-type$|^\:nth\-last\-of\-type$|^\:first\-child$|^\:last\-child$|^\:first\-of\-type$|^\:last\-of\-type$|^\:only\-child$|^\:only\-of\-type$|^\:empty$|^\:link|^\:visited$|^\:active$|^\:hover$|^\:focus$|^\:target$|^\:lang$|^\:enabled^\:disabled$|^\:checked$|^\:first\-line$|^\:first\-letter$|^\:before$|^\:after$|^\:not$|^\:required$|^\:invalid$)/;
function tokenBase(stream, state) {
var ch = stream.next();
if (ch == "@") {stream.eatWhile(/[\w\-]/); return ret("meta", stream.current());}
else if (ch == "/" && stream.eat("*")) {
state.tokenize = tokenCComment;
return tokenCComment(stream, state);
}
else if (ch == "<" && stream.eat("!")) {
state.tokenize = tokenSGMLComment;
return tokenSGMLComment(stream, state);
}
else if (ch == "=") ret(null, "compare");
else if (ch == "|" && stream.eat("=")) return ret(null, "compare");
else if (ch == "\"" || ch == "'") {
state.tokenize = tokenString(ch);
return state.tokenize(stream, state);
}
else if (ch == "/") { // e.g.: .png will not be parsed as a class
if(stream.eat("/")){
state.tokenize = tokenSComment;
return tokenSComment(stream, state);
}else{
if(type == "string" || type == "(")return ret("string", "string");
if(state.stack[state.stack.length-1] != undefined)return ret(null, ch);
stream.eatWhile(/[\a-zA-Z0-9\-_.\s]/);
if( /\/|\)|#/.test(stream.peek() || (stream.eatSpace() && stream.peek() == ")")) || stream.eol() )return ret("string", "string"); // let url(/images/logo.png) without quotes return as string
}
}
else if (ch == "!") {
stream.match(/^\s*\w*/);
return ret("keyword", "important");
}
else if (/\d/.test(ch)) {
stream.eatWhile(/[\w.%]/);
return ret("number", "unit");
}
else if (/[,+<>*\/]/.test(ch)) {
if(stream.peek() == "=" || type == "a")return ret("string", "string");
return ret(null, "select-op");
}
else if (/[;{}:\[\]()~\|]/.test(ch)) {
if(ch == ":"){
stream.eatWhile(/[a-z\\\-]/);
if( selectors.test(stream.current()) ){
return ret("tag", "tag");
}else if(stream.peek() == ":"){//::-webkit-search-decoration
stream.next();
stream.eatWhile(/[a-z\\\-]/);
if(stream.current().match(/\:\:\-(o|ms|moz|webkit)\-/))return ret("string", "string");
if( selectors.test(stream.current().substring(1)) )return ret("tag", "tag");
return ret(null, ch);
}else{
return ret(null, ch);
}
}else if(ch == "~"){
if(type == "r")return ret("string", "string");
}else{
return ret(null, ch);
}
}
else if (ch == ".") {
if(type == "(" || type == "string")return ret("string", "string"); // allow url(../image.png)
stream.eatWhile(/[\a-zA-Z0-9\-_]/);
if(stream.peek() == " ")stream.eatSpace();
if(stream.peek() == ")")return ret("number", "unit");//rgba(0,0,0,.25);
return ret("tag", "tag");
}
else if (ch == "#") {
//we don't eat white-space, we want the hex color and or id only
stream.eatWhile(/[A-Za-z0-9]/);
//check if there is a proper hex color length e.g. #eee || #eeeEEE
if(stream.current().length == 4 || stream.current().length == 7){
if(stream.current().match(/[A-Fa-f0-9]{6}|[A-Fa-f0-9]{3}/,false) != null){//is there a valid hex color value present in the current stream
//when not a valid hex value, parse as id
if(stream.current().substring(1) != stream.current().match(/[A-Fa-f0-9]{6}|[A-Fa-f0-9]{3}/,false))return ret("atom", "tag");
//eat white-space
stream.eatSpace();
//when hex value declaration doesn't end with [;,] but is does with a slash/cc comment treat it as an id, just like the other hex values that don't end with[;,]
if( /[\/<>.(){!$%^&*_\-\\?=+\|#'~`]/.test(stream.peek()) )return ret("atom", "tag");
//#time { color: #aaa }
else if(stream.peek() == "}" )return ret("number", "unit");
//we have a valid hex color value, parse as id whenever an element/class is defined after the hex(id) value e.g. #eee aaa || #eee .aaa
else if( /[a-zA-Z\\]/.test(stream.peek()) )return ret("atom", "tag");
//when a hex value is on the end of a line, parse as id
else if(stream.eol())return ret("atom", "tag");
//default
else return ret("number", "unit");
}else{//when not a valid hexvalue in the current stream e.g. #footer
stream.eatWhile(/[\w\\\-]/);
return ret("atom", "tag");
}
}else{//when not a valid hexvalue length
stream.eatWhile(/[\w\\\-]/);
return ret("atom", "tag");
}
}
else if (ch == "&") {
stream.eatWhile(/[\w\-]/);
return ret(null, ch);
}
else {
stream.eatWhile(/[\w\\\-_%.{]/);
if(type == "string"){
return ret("string", "string");
}else if(stream.current().match(/(^http$|^https$)/) != null){
stream.eatWhile(/[\w\\\-_%.{:\/]/);
return ret("string", "string");
}else if(stream.peek() == "<" || stream.peek() == ">"){
return ret("tag", "tag");
}else if( /\(/.test(stream.peek()) ){
return ret(null, ch);
}else if (stream.peek() == "/" && state.stack[state.stack.length-1] != undefined){ // url(dir/center/image.png)
return ret("string", "string");
}else if( stream.current().match(/\-\d|\-.\d/) ){ // match e.g.: -5px -0.4 etc... only colorize the minus sign
//commment out these 2 comment if you want the minus sign to be parsed as null -500px
//stream.backUp(stream.current().length-1);
//return ret(null, ch); //console.log( stream.current() );
return ret("number", "unit");
}else if( /\/|[\s\)]/.test(stream.peek() || stream.eol() || (stream.eatSpace() && stream.peek() == "/")) && stream.current().indexOf(".") !== -1){
if(stream.current().substring(stream.current().length-1,stream.current().length) == "{"){
stream.backUp(1);
return ret("tag", "tag");
}//end if
stream.eatSpace();
if( /[{<>.a-zA-Z\/]/.test(stream.peek()) || stream.eol() )return ret("tag", "tag"); // e.g. button.icon-plus
return ret("string", "string"); // let url(/images/logo.png) without quotes return as string
}else if( stream.eol() || stream.peek() == "[" || stream.peek() == "#" || type == "tag" ){
if(stream.current().substring(stream.current().length-1,stream.current().length) == "{")stream.backUp(1);
return ret("tag", "tag");
}else if(type == "compare" || type == "a" || type == "("){
return ret("string", "string");
}else if(type == "|" || stream.current() == "-" || type == "["){
return ret(null, ch);
}else if(stream.peek() == ":") {
stream.next();
var t_v = stream.peek() == ":" ? true : false;
if(!t_v){
var old_pos = stream.pos;
var sc = stream.current().length;
stream.eatWhile(/[a-z\\\-]/);
var new_pos = stream.pos;
if(stream.current().substring(sc-1).match(selectors) != null){
stream.backUp(new_pos-(old_pos-1));
return ret("tag", "tag");
} else stream.backUp(new_pos-(old_pos-1));
}else{
stream.backUp(1);
}
if(t_v)return ret("tag", "tag"); else return ret("variable", "variable");
}else{
return ret("variable", "variable");
}
}
}
function tokenSComment(stream, state) { // SComment = Slash comment
stream.skipToEnd();
state.tokenize = tokenBase;
return ret("comment", "comment");
}
function tokenCComment(stream, state) {
var maybeEnd = false, ch;
while ((ch = stream.next()) != null) {
if (maybeEnd && ch == "/") {
state.tokenize = tokenBase;
break;
}
maybeEnd = (ch == "*");
}
return ret("comment", "comment");
}
function tokenSGMLComment(stream, state) {
var dashes = 0, ch;
while ((ch = stream.next()) != null) {
if (dashes >= 2 && ch == ">") {
state.tokenize = tokenBase;
break;
}
dashes = (ch == "-") ? dashes + 1 : 0;
}
return ret("comment", "comment");
}
function tokenString(quote) {
return function(stream, state) {
var escaped = false, ch;
while ((ch = stream.next()) != null) {
if (ch == quote && !escaped)
break;
escaped = !escaped && ch == "\\";
}
if (!escaped) state.tokenize = tokenBase;
return ret("string", "string");
};
}
return {
startState: function(base) {
return {tokenize: tokenBase,
baseIndent: base || 0,
stack: []};
},
token: function(stream, state) {
if (stream.eatSpace()) return null;
var style = state.tokenize(stream, state);
var context = state.stack[state.stack.length-1];
if (type == "hash" && context == "rule") style = "atom";
else if (style == "variable") {
if (context == "rule") style = null; //"tag"
else if (!context || context == "@media{") {
style = stream.current() == "when" ? "variable" :
/[\s,|\s\)|\s]/.test(stream.peek()) ? "tag" : type;
}
}
if (context == "rule" && /^[\{\};]$/.test(type))
state.stack.pop();
if (type == "{") {
if (context == "@media") state.stack[state.stack.length-1] = "@media{";
else state.stack.push("{");
}
else if (type == "}") state.stack.pop();
else if (type == "@media") state.stack.push("@media");
else if (context == "{" && type != "comment") state.stack.push("rule");
return style;
},
indent: function(state, textAfter) {
var n = state.stack.length;
if (/^\}/.test(textAfter))
n -= state.stack[state.stack.length-1] == "rule" ? 2 : 1;
return state.baseIndent + n * indentUnit;
},
electricChars: "}"
};
});
CodeMirror.defineMIME("text/x-less", "less");
if (!CodeMirror.mimeModes.hasOwnProperty("text/css"))
CodeMirror.defineMIME("text/css", "less"); | PypiClean |
/DeepCell-CPU-0.12.9.tar.gz/DeepCell-CPU-0.12.9/deepcell/image_generators/__init__.py | import warnings
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.utils import to_categorical
from deepcell.utils import transform_utils
def _transform_masks(y, transform, data_format=None, **kwargs):
"""Based on the transform key, apply a transform function to the masks.
Refer to :mod:`deepcell.utils.transform_utils` for more information about
available transforms. Caution for unknown transform keys.
Args:
y (numpy.array): Labels of ``ndim`` 4 or 5
transform (str): Name of the transform, one of
``{"deepcell", "disc", "watershed", None}``.
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
kwargs (dict): Optional transform keyword arguments.
Returns:
numpy.array: the output of the given transform function on ``y``.
Raises:
ValueError: Rank of ``y`` is not 4 or 5.
ValueError: Channel dimension of ``y`` is not 1.
ValueError: ``transform`` is invalid value.
"""
valid_transforms = {
'deepcell', # deprecated for "pixelwise"
'pixelwise',
'disc',
'watershed', # deprecated for "outer-distance"
'watershed-cont', # deprecated for "outer-distance"
'inner-distance', 'inner_distance',
'outer-distance', 'outer_distance',
'centroid', # deprecated for "inner-distance"
'fgbg'
}
if data_format is None:
data_format = K.image_data_format()
if y.ndim not in {4, 5}:
raise ValueError('`labels` data must be of ndim 4 or 5. Got', y.ndim)
channel_axis = 1 if data_format == 'channels_first' else -1
if y.shape[channel_axis] != 1:
raise ValueError('Expected channel axis to be 1 dimension. Got',
y.shape[1 if data_format == 'channels_first' else -1])
if isinstance(transform, str):
transform = transform.lower()
if transform not in valid_transforms and transform is not None:
raise ValueError(f'`{transform}` is not a valid transform')
if transform in {'pixelwise', 'deepcell'}:
if transform == 'deepcell':
warnings.warn(f'The `{transform}` transform is deprecated. Please use the '
'`pixelwise` transform instead.',
DeprecationWarning)
dilation_radius = kwargs.pop('dilation_radius', None)
separate_edge_classes = kwargs.pop('separate_edge_classes', False)
edge_class_shape = 4 if separate_edge_classes else 3
if data_format == 'channels_first':
shape = tuple([y.shape[0]] + [edge_class_shape] + list(y.shape[2:]))
else:
shape = tuple(list(y.shape[0:-1]) + [edge_class_shape])
# using uint8 since should only be 4 unique values.
y_transform = np.zeros(shape, dtype=np.uint8)
for batch in range(y_transform.shape[0]):
if data_format == 'channels_first':
mask = y[batch, 0, ...]
else:
mask = y[batch, ..., 0]
y_transform[batch] = transform_utils.pixelwise_transform(
mask, dilation_radius, data_format=data_format,
separate_edge_classes=separate_edge_classes)
elif transform in {'outer-distance', 'outer_distance',
'watershed', 'watershed-cont'}:
if transform in {'watershed', 'watershed-cont'}:
warnings.warn(f'The `{transform}` transform is deprecated. Please use the '
'`outer-distance` transform instead.',
DeprecationWarning)
by_frame = kwargs.pop('by_frame', True)
bins = kwargs.pop('distance_bins', None)
distance_kwargs = {
'bins': bins,
'erosion_width': kwargs.pop('erosion_width', 0),
}
# If using 3d transform, pass in scale arg
if y.ndim == 5 and not by_frame:
distance_kwargs['sampling'] = kwargs.pop('sampling', [0.5, 0.217, 0.217])
if data_format == 'channels_first':
shape = tuple([y.shape[0]] + list(y.shape[2:]))
else:
shape = y.shape[0:-1]
y_transform = np.zeros(shape, dtype=K.floatx())
if y.ndim == 5:
if by_frame:
_distance_transform = transform_utils.outer_distance_transform_movie
else:
_distance_transform = transform_utils.outer_distance_transform_3d
else:
_distance_transform = transform_utils.outer_distance_transform_2d
for batch in range(y_transform.shape[0]):
if data_format == 'channels_first':
mask = y[batch, 0, ...]
else:
mask = y[batch, ..., 0]
y_transform[batch] = _distance_transform(mask, **distance_kwargs)
y_transform = np.expand_dims(y_transform, axis=-1)
if bins is not None:
# convert to one hot notation
# uint8's max value of255 seems like a generous limit for binning.
y_transform = to_categorical(y_transform, num_classes=bins, dtype=np.uint8)
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, y.ndim - 1, 1)
elif transform in {'inner-distance', 'inner_distance', 'centroid'}:
if transform == 'centroid':
warnings.warn(f'The `{transform}` transform is deprecated. Please use the '
'`inner-distance` transform instead.',
DeprecationWarning)
by_frame = kwargs.pop('by_frame', True)
bins = kwargs.pop('distance_bins', None)
distance_kwargs = {
'bins': bins,
'erosion_width': kwargs.pop('erosion_width', 0),
'alpha': kwargs.pop('alpha', 0.1),
'beta': kwargs.pop('beta', 1)
}
# If using 3d transform, pass in scale arg
if y.ndim == 5 and not by_frame:
distance_kwargs['sampling'] = kwargs.pop('sampling', [0.5, 0.217, 0.217])
if data_format == 'channels_first':
shape = tuple([y.shape[0]] + list(y.shape[2:]))
else:
shape = y.shape[0:-1]
y_transform = np.zeros(shape, dtype=K.floatx())
if y.ndim == 5:
if by_frame:
_distance_transform = transform_utils.inner_distance_transform_movie
else:
_distance_transform = transform_utils.inner_distance_transform_3d
else:
_distance_transform = transform_utils.inner_distance_transform_2d
for batch in range(y_transform.shape[0]):
if data_format == 'channels_first':
mask = y[batch, 0, ...]
else:
mask = y[batch, ..., 0]
y_transform[batch] = _distance_transform(mask, **distance_kwargs)
y_transform = np.expand_dims(y_transform, axis=-1)
if distance_kwargs['bins'] is not None:
# convert to one hot notation
# uint8's max value of255 seems like a generous limit for binning.
y_transform = to_categorical(y_transform, num_classes=bins, dtype=np.uint8)
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, y.ndim - 1, 1)
elif transform == 'disc' or transform is None:
dtype = K.floatx() if transform == 'disc' else np.int32
y_transform = to_categorical(y.squeeze(channel_axis), dtype=dtype)
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, y.ndim - 1, 1)
elif transform == 'fgbg':
y_transform = np.where(y > 1, 1, y)
# convert to one hot notation
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, 1, y.ndim)
# using uint8 since should only be 2 unique values.
y_transform = to_categorical(y_transform, dtype=np.uint8)
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, y.ndim - 1, 1)
return y_transform
# Globally-importable utils.
from deepcell.image_generators.fully_convolutional import ImageFullyConvDataGenerator
from deepcell.image_generators.fully_convolutional import ImageFullyConvIterator
from deepcell.image_generators.fully_convolutional import MovieDataGenerator
from deepcell.image_generators.fully_convolutional import MovieArrayIterator
from deepcell.image_generators.semantic import SemanticDataGenerator
from deepcell.image_generators.semantic import SemanticIterator
from deepcell.image_generators.semantic import SemanticMovieGenerator
from deepcell.image_generators.semantic import SemanticMovieIterator
from deepcell.image_generators.semantic import Semantic3DGenerator
from deepcell.image_generators.semantic import Semantic3DIterator
from deepcell.image_generators.sample import SampleDataGenerator
from deepcell.image_generators.sample import ImageSampleArrayIterator
from deepcell.image_generators.sample import SampleMovieDataGenerator
from deepcell.image_generators.sample import SampleMovieArrayIterator
from deepcell.image_generators.scale import ScaleIterator
from deepcell.image_generators.scale import ScaleDataGenerator
from deepcell.image_generators.tracking import SiameseDataGenerator
from deepcell.image_generators.tracking import SiameseIterator
from deepcell.image_generators.cropping import CroppingDataGenerator
from deepcell.image_generators.cropping import CroppingIterator
__all__ = [
'ImageFullyConvDataGenerator',
'ImageFullyConvIterator',
'MovieDataGenerator',
'MovieArrayIterator',
'SampleDataGenerator',
'ImageSampleArrayIterator',
'SampleMovieDataGenerator',
'SampleMovieArrayIterator',
'ScaleIterator',
'ScaleDataGenerator',
'SiameseDataGenerator',
'SiameseIterator',
'CroppingDataGenerator',
'CroppingIterator',
'SemanticIterator',
'SemanticDataGenerator',
'SemanticMovieIterator',
'SemanticMovieGenerator',
'Semantic3DIterator',
'Semantic3DGenerator',
] | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/jquery-validation/localization/messages_nl.js | (function( factory ) {
if ( typeof define === "function" && define.amd ) {
define( ["jquery", "../jquery.validate"], factory );
} else if (typeof module === "object" && module.exports) {
module.exports = factory( require( "jquery" ) );
} else {
factory( jQuery );
}
}(function( $ ) {
/*
* Translated default messages for the jQuery validation plugin.
* Locale: NL (Dutch; Nederlands, Vlaams)
*/
$.extend( $.validator.messages, {
required: "Dit is een verplicht veld.",
remote: "Controleer dit veld.",
email: "Vul hier een geldig e-mailadres in.",
url: "Vul hier een geldige URL in.",
date: "Vul hier een geldige datum in.",
dateISO: "Vul hier een geldige datum in (ISO-formaat).",
number: "Vul hier een geldig getal in.",
digits: "Vul hier alleen getallen in.",
creditcard: "Vul hier een geldig creditcardnummer in.",
equalTo: "Vul hier dezelfde waarde in.",
extension: "Vul hier een waarde in met een geldige extensie.",
maxlength: $.validator.format( "Vul hier maximaal {0} tekens in." ),
minlength: $.validator.format( "Vul hier minimaal {0} tekens in." ),
rangelength: $.validator.format( "Vul hier een waarde in van minimaal {0} en maximaal {1} tekens." ),
range: $.validator.format( "Vul hier een waarde in van minimaal {0} en maximaal {1}." ),
max: $.validator.format( "Vul hier een waarde in kleiner dan of gelijk aan {0}." ),
min: $.validator.format( "Vul hier een waarde in groter dan of gelijk aan {0}." ),
step: $.validator.format( "Vul hier een veelvoud van {0} in." ),
// For validations in additional-methods.js
iban: "Vul hier een geldig IBAN in.",
dateNL: "Vul hier een geldige datum in.",
phoneNL: "Vul hier een geldig Nederlands telefoonnummer in.",
mobileNL: "Vul hier een geldig Nederlands mobiel telefoonnummer in.",
postalcodeNL: "Vul hier een geldige postcode in.",
bankaccountNL: "Vul hier een geldig bankrekeningnummer in.",
giroaccountNL: "Vul hier een geldig gironummer in.",
bankorgiroaccountNL: "Vul hier een geldig bank- of gironummer in."
} );
return $;
})); | PypiClean |
/ESMValCore-2.9.0rc1.tar.gz/ESMValCore-2.9.0rc1/esmvalcore/preprocessor/_io.py | import copy
import logging
import os
import shutil
import warnings
from itertools import groupby
from warnings import catch_warnings, filterwarnings
import iris
import iris.aux_factory
import iris.exceptions
import yaml
from cf_units import suppress_errors
from esmvalcore.exceptions import ESMValCoreDeprecationWarning
from esmvalcore.iris_helpers import merge_cube_attributes
from .._task import write_ncl_settings
from ._time import extract_time
logger = logging.getLogger(__name__)
GLOBAL_FILL_VALUE = 1e+20
DATASET_KEYS = {
'mip',
}
VARIABLE_KEYS = {
'reference_dataset',
'alternative_dataset',
}
def _fix_aux_factories(cube):
"""Fix :class:`iris.aux_factory.AuxCoordFactory` after concatenation.
Necessary because of bug in :mod:`iris` (see issue #2478).
"""
coord_names = [coord.name() for coord in cube.coords()]
# Hybrid sigma pressure coordinate
# TODO possibly add support for other hybrid coordinates
if 'atmosphere_hybrid_sigma_pressure_coordinate' in coord_names:
new_aux_factory = iris.aux_factory.HybridPressureFactory(
delta=cube.coord(var_name='ap'),
sigma=cube.coord(var_name='b'),
surface_air_pressure=cube.coord(var_name='ps'),
)
for aux_factory in cube.aux_factories:
if isinstance(aux_factory, iris.aux_factory.HybridPressureFactory):
break
else:
cube.add_aux_factory(new_aux_factory)
# Hybrid sigma height coordinate
if 'atmosphere_hybrid_height_coordinate' in coord_names:
new_aux_factory = iris.aux_factory.HybridHeightFactory(
delta=cube.coord(var_name='lev'),
sigma=cube.coord(var_name='b'),
orography=cube.coord(var_name='orog'),
)
for aux_factory in cube.aux_factories:
if isinstance(aux_factory, iris.aux_factory.HybridHeightFactory):
break
else:
cube.add_aux_factory(new_aux_factory)
# Atmosphere sigma coordinate
if 'atmosphere_sigma_coordinate' in coord_names:
new_aux_factory = iris.aux_factory.AtmosphereSigmaFactory(
pressure_at_top=cube.coord(var_name='ptop'),
sigma=cube.coord(var_name='lev'),
surface_air_pressure=cube.coord(var_name='ps'),
)
for aux_factory in cube.aux_factories:
if isinstance(aux_factory,
iris.aux_factory.AtmosphereSigmaFactory):
break
else:
cube.add_aux_factory(new_aux_factory)
def _get_attr_from_field_coord(ncfield, coord_name, attr):
if coord_name is not None:
attrs = ncfield.cf_group[coord_name].cf_attrs()
attr_val = [value for (key, value) in attrs if key == attr]
if attr_val:
return attr_val[0]
return None
def concatenate_callback(raw_cube, field, _):
"""Use this callback to fix anything Iris tries to break."""
# Remove attributes that cause issues with merging and concatenation
_delete_attributes(
raw_cube,
('creation_date', 'tracking_id', 'history', 'comment')
)
for coord in raw_cube.coords():
# Iris chooses to change longitude and latitude units to degrees
# regardless of value in file, so reinstating file value
if coord.standard_name in ['longitude', 'latitude']:
units = _get_attr_from_field_coord(field, coord.var_name, 'units')
if units is not None:
coord.units = units
# CMOR sometimes adds a history to the coordinates.
_delete_attributes(coord, ('history', ))
def _delete_attributes(iris_object, atts):
for att in atts:
if att in iris_object.attributes:
del iris_object.attributes[att]
def load(file, callback=None, ignore_warnings=None):
"""Load iris cubes from files.
Parameters
----------
file: str
File to be loaded.
callback: callable or None, optional (default: None)
Callback function passed to :func:`iris.load_raw`.
.. deprecated:: 2.8.0
This argument will be removed in 2.10.0.
ignore_warnings: list of dict or None, optional (default: None)
Keyword arguments passed to :func:`warnings.filterwarnings` used to
ignore warnings issued by :func:`iris.load_raw`. Each list element
corresponds to one call to :func:`warnings.filterwarnings`.
Returns
-------
iris.cube.CubeList
Loaded cubes.
Raises
------
ValueError
Cubes are empty.
"""
if not (callback is None or callback == 'default'):
msg = ("The argument `callback` has been deprecated in "
"ESMValCore version 2.8.0 and is scheduled for removal in "
"version 2.10.0.")
warnings.warn(msg, ESMValCoreDeprecationWarning)
if callback == 'default':
callback = concatenate_callback
file = str(file)
logger.debug("Loading:\n%s", file)
if ignore_warnings is None:
ignore_warnings = []
# Avoid duplication of ignored warnings when load() is called more often
# than once
ignore_warnings = list(ignore_warnings)
# Default warnings ignored for every dataset
ignore_warnings.append({
'message': "Missing CF-netCDF measure variable .*",
'category': UserWarning,
'module': 'iris',
})
ignore_warnings.append({
'message': "Ignoring netCDF variable '.*' invalid units '.*'",
'category': UserWarning,
'module': 'iris',
})
# Filter warnings
with catch_warnings():
for warning_kwargs in ignore_warnings:
warning_kwargs.setdefault('action', 'ignore')
filterwarnings(**warning_kwargs)
# Suppress UDUNITS-2 error messages that cannot be ignored with
# warnings.filterwarnings
# (see https://github.com/SciTools/cf-units/issues/240)
with suppress_errors():
raw_cubes = iris.load_raw(file, callback=callback)
logger.debug("Done with loading %s", file)
if not raw_cubes:
raise ValueError(f'Can not load cubes from {file}')
for cube in raw_cubes:
cube.attributes['source_file'] = file
return raw_cubes
def _by_two_concatenation(cubes):
"""Perform a by-2 concatenation to avoid gaps."""
concatenated = iris.cube.CubeList(cubes).concatenate()
if len(concatenated) == 1:
return concatenated[0]
concatenated = _concatenate_overlapping_cubes(concatenated)
if len(concatenated) == 2:
_get_concatenation_error(concatenated)
else:
return concatenated[0]
def _get_concatenation_error(cubes):
"""Raise an error for concatenation."""
# Concatenation not successful -> retrieve exact error message
try:
iris.cube.CubeList(cubes).concatenate_cube()
except iris.exceptions.ConcatenateError as exc:
msg = str(exc)
logger.error('Can not concatenate cubes into a single one: %s', msg)
logger.error('Resulting cubes:')
for cube in cubes:
logger.error(cube)
time = cube.coord("time")
logger.error('From %s to %s', time.cell(0), time.cell(-1))
raise ValueError(f'Can not concatenate cubes: {msg}')
def concatenate(cubes):
"""Concatenate all cubes after fixing metadata."""
if not cubes:
return cubes
if len(cubes) == 1:
return cubes[0]
merge_cube_attributes(cubes)
if len(cubes) > 1:
# order cubes by first time point
try:
cubes = sorted(cubes, key=lambda c: c.coord("time").cell(0).point)
except iris.exceptions.CoordinateNotFoundError as exc:
msg = "One or more cubes {} are missing".format(cubes) + \
" time coordinate: {}".format(str(exc))
raise ValueError(msg)
# iteratively concatenate starting with first cube
result = cubes[0]
for cube in cubes[1:]:
result = _by_two_concatenation([result, cube])
_fix_aux_factories(result)
return result
def save(cubes,
filename,
optimize_access='',
compress=False,
alias='',
**kwargs):
"""Save iris cubes to file.
Parameters
----------
cubes: iterable of iris.cube.Cube
Data cubes to be saved
filename: str
Name of target file
optimize_access: str
Set internal NetCDF chunking to favour a reading scheme
Values can be map or timeseries, which improve performance when
reading the file one map or time series at a time.
Users can also provide a coordinate or a list of coordinates. In that
case the better performance will be avhieved by loading all the values
in that coordinate at a time
compress: bool, optional
Use NetCDF internal compression.
alias: str, optional
Var name to use when saving instead of the one in the cube.
Returns
-------
str
filename
Raises
------
ValueError
cubes is empty.
"""
if not cubes:
raise ValueError(f"Cannot save empty cubes '{cubes}'")
# Rename some arguments
kwargs['target'] = filename
kwargs['zlib'] = compress
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
if (os.path.exists(filename)
and all(cube.has_lazy_data() for cube in cubes)):
logger.debug(
"Not saving cubes %s to %s to avoid data loss. "
"The cube is probably unchanged.", cubes, filename)
return filename
for cube in cubes:
logger.debug("Saving cube:\n%s\nwith %s data to %s", cube,
"lazy" if cube.has_lazy_data() else "realized", filename)
if optimize_access:
cube = cubes[0]
if optimize_access == 'map':
dims = set(
cube.coord_dims('latitude') + cube.coord_dims('longitude'))
elif optimize_access == 'timeseries':
dims = set(cube.coord_dims('time'))
else:
dims = tuple()
for coord_dims in (cube.coord_dims(dimension)
for dimension in optimize_access.split(' ')):
dims += coord_dims
dims = set(dims)
kwargs['chunksizes'] = tuple(
length if index in dims else 1
for index, length in enumerate(cube.shape))
kwargs['fill_value'] = GLOBAL_FILL_VALUE
if alias:
for cube in cubes:
logger.debug('Changing var_name from %s to %s', cube.var_name,
alias)
cube.var_name = alias
iris.save(cubes, **kwargs)
return filename
def _get_debug_filename(filename, step):
"""Get a filename for debugging the preprocessor."""
dirname = os.path.splitext(filename)[0]
if os.path.exists(dirname) and os.listdir(dirname):
num = int(sorted(os.listdir(dirname)).pop()[:2]) + 1
else:
num = 0
filename = os.path.join(dirname, '{:02}_{}.nc'.format(num, step))
return filename
def cleanup(files, remove=None):
"""Clean up after running the preprocessor.
Warning
-------
.. deprecated:: 2.8.0
This function is no longer used and has been deprecated since
ESMValCore version 2.8.0. It is scheduled for removal in version
2.10.0.
Parameters
----------
files: list of Path
Preprocessor output files (will not be removed if not in `removed`).
remove: list of Path or None, optional (default: None)
Files or directories to remove.
Returns
-------
list of Path
Preprocessor output files.
"""
deprecation_msg = (
"The preprocessor function `cleanup` has been deprecated in "
"ESMValCore version 2.8.0 and is scheduled for removal in version "
"2.10.0."
)
warnings.warn(deprecation_msg, ESMValCoreDeprecationWarning)
if remove is None:
remove = []
for path in remove:
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.isfile(path):
os.remove(path)
return files
def write_metadata(products, write_ncl=False):
"""Write product metadata to file."""
output_files = []
for output_dir, prods in groupby(products,
lambda p: os.path.dirname(p.filename)):
sorted_products = sorted(
prods,
key=lambda p: (
p.attributes.get('recipe_dataset_index', 1e6),
p.attributes.get('dataset', ''),
),
)
metadata = {}
for product in sorted_products:
if isinstance(product.attributes.get('exp'), (list, tuple)):
product.attributes = dict(product.attributes)
product.attributes['exp'] = '-'.join(product.attributes['exp'])
if 'original_short_name' in product.attributes:
del product.attributes['original_short_name']
metadata[product.filename] = product.attributes
output_filename = os.path.join(output_dir, 'metadata.yml')
output_files.append(output_filename)
with open(output_filename, 'w') as file:
yaml.safe_dump(metadata, file)
if write_ncl:
output_files.append(_write_ncl_metadata(output_dir, metadata))
return output_files
def _write_ncl_metadata(output_dir, metadata):
"""Write NCL metadata files to output_dir."""
variables = [copy.deepcopy(v) for v in metadata.values()]
info = {'input_file_info': variables}
# Split input_file_info into dataset and variable properties
# dataset keys and keys with non-identical values will be stored
# in dataset_info, the rest in variable_info
variable_info = {}
info['variable_info'] = [variable_info]
info['dataset_info'] = []
for variable in variables:
dataset_info = {}
info['dataset_info'].append(dataset_info)
for key in variable:
dataset_specific = any(variable[key] != var.get(key, object())
for var in variables)
if ((dataset_specific or key in DATASET_KEYS)
and key not in VARIABLE_KEYS):
dataset_info[key] = variable[key]
else:
variable_info[key] = variable[key]
filename = os.path.join(output_dir,
variable_info['short_name'] + '_info.ncl')
write_ncl_settings(info, filename)
return filename
def _concatenate_overlapping_cubes(cubes):
"""Concatenate time-overlapping cubes (two cubes only)."""
# we arrange [cube1, cube2] so that cube1.start <= cube2.start
if cubes[0].coord('time').points[0] <= cubes[1].coord('time').points[0]:
cubes = [cubes[0], cubes[1]]
logger.debug(
"Will attempt to concatenate cubes %s "
"and %s in this order", cubes[0], cubes[1])
else:
cubes = [cubes[1], cubes[0]]
logger.debug(
"Will attempt to concatenate cubes %s "
"and %s in this order", cubes[1], cubes[0])
# get time end points
time_1 = cubes[0].coord('time')
time_2 = cubes[1].coord('time')
if time_1.units != time_2.units:
raise ValueError(
f"Cubes\n{cubes[0]}\nand\n{cubes[1]}\ncan not be concatenated: "
f"time units {time_1.units}, calendar {time_1.units.calendar} "
f"and {time_2.units}, calendar {time_2.units.calendar} differ")
data_start_1 = time_1.cell(0).point
data_start_2 = time_2.cell(0).point
data_end_1 = time_1.cell(-1).point
data_end_2 = time_2.cell(-1).point
# case 1: both cubes start at the same time -> return longer cube
if data_start_1 == data_start_2:
if data_end_1 <= data_end_2:
logger.debug(
"Both cubes start at the same time but cube %s "
"ends before %s", cubes[0], cubes[1])
logger.debug("Cube %s contains all needed data so using it fully",
cubes[1])
cubes = [cubes[1]]
else:
logger.debug(
"Both cubes start at the same time but cube %s "
"ends before %s", cubes[1], cubes[0])
logger.debug("Cube %s contains all needed data so using it fully",
cubes[0])
cubes = [cubes[0]]
# case 2: cube1 starts before cube2
else:
# find time overlap, if any
start_overlap = next((time_1.units.num2date(t)
for t in time_1.points if t in time_2.points),
None)
# case 2.0: no overlap (new iris implementation does allow
# concatenation of cubes with no overlap)
if not start_overlap:
logger.debug(
"Unable to concatenate non-overlapping cubes\n%s\nand\n%s"
"separated in time.", cubes[0], cubes[1])
# case 2.1: cube1 ends after cube2 -> return cube1
elif data_end_1 > data_end_2:
cubes = [cubes[0]]
logger.debug("Using only data from %s", cubes[0])
# case 2.2: cube1 ends before cube2 -> use full cube2 and shorten cube1
else:
logger.debug(
"Extracting time slice between %s and %s from cube %s to use "
"it for concatenation with cube %s", "-".join([
str(data_start_1.year),
str(data_start_1.month),
str(data_start_1.day)
]), "-".join([
str(start_overlap.year),
str(start_overlap.month),
str(start_overlap.day)
]), cubes[0], cubes[1])
c1_delta = extract_time(cubes[0], data_start_1.year,
data_start_1.month, data_start_1.day,
start_overlap.year, start_overlap.month,
start_overlap.day)
# convert c1_delta scalar cube to vector cube, if needed
if c1_delta.data.shape == ():
c1_delta = iris.util.new_axis(c1_delta, scalar_coord="time")
cubes = iris.cube.CubeList([c1_delta, cubes[1]])
logger.debug("Attempting concatenatenation of %s with %s",
c1_delta, cubes[1])
try:
cubes = [iris.cube.CubeList(cubes).concatenate_cube()]
except iris.exceptions.ConcatenateError as ex:
logger.error('Can not concatenate cubes: %s', ex)
logger.error('Cubes:')
for cube in cubes:
logger.error(cube)
raise ex
return cubes | PypiClean |
/Homie4-0.4.0.tar.gz/Homie4-0.4.0/homie/mqtt/mqtt_base.py | #from homie.support.network_information import Network_Information
#network_info = Network_Information()
import logging
logger = logging.getLogger(__name__)
class MQTT_Base(object):
def __init__(self, mqtt_settings, last_will):
logger.debug("MQTT client Settings {}".format(mqtt_settings))
self.last_will = last_will
self.using_shared_mqtt_client = mqtt_settings["MQTT_SHARE_CLIENT"]
self.mqtt_settings = mqtt_settings
self._mqtt_connected = False
self.ip_address = None
self.mac_address = None
self.homie_devices = []
@property
def mqtt_connected(self):
return self._mqtt_connected
@mqtt_connected.setter
def mqtt_connected(self, connected):
if connected != self._mqtt_connected:
logger.debug("MQTT Connected is {} ".format(connected))
self._mqtt_connected = connected
for device in self.homie_devices:
if device.start_time is not None:
device.mqtt_on_connection(connected)
def connect(
self,
): # called by the device when its ready for the mqtt client to start, subclass to provide
logger.debug(
"MQTT Connecting to {} as client {}".format(
self.mqtt_settings["MQTT_BROKER"], self.mqtt_settings["MQTT_CLIENT_ID"]
)
)
def publish(self, topic, payload, retain, qos): # subclass to provide
logger.debug(
"MQTT publish topic: {}, payload: {}, retain {}, qos {}".format(
topic, payload, retain, qos
)
)
def subscribe(self, topic, qos): # subclass to provide
logger.debug("MQTT subscribe topic: {}, qos {}".format(topic, qos))
def unsubscribe(self, topic): # subclass to provide
logger.debug("MQTT unsubscribe topic: {}".format(topic))
def set_will(self, will, topic, retain, qos): # subclass to provide
logger.info("MQTT set will {}, topic {}".format(will, topic))
def get_mac_ip_address(self):
'''
if self.ip_address is None:
self.ip_address = network_info.get_local_ip(
self.mqtt_settings["MQTT_BROKER"], self.mqtt_settings["MQTT_PORT"]
)
if self.mac_address is None:
self.mac_address = network_info.get_local_mac_for_ip(self.ip_address)
return self.mac_address, self.ip_address
'''
return "NoMAC","NoIP"
def _on_message(self, topic, payload, retain, qos):
logger.debug(
"MQTT On Message: Topic {}, Payload {} Reatin {} QOS {}".format(
topic, payload, retain, qos
)
)
for device in self.homie_devices:
if device.start_time is not None: # device is ready
try:
device.mqtt_on_message(topic, payload, retain == 1, qos)
except:
logger.exception("on_message error")
def _on_disconnect(self, rc):
logger.warning("MQTT On Disconnect: Result Code {}".format(rc))
self.mqtt_connected = False
def add_device(self, device):
self.homie_devices.append(device)
def remove_device(self, device): # not tested
del self.homie_devices[device]
def close(self):
logger.info("MQTT Closing") | PypiClean |
/NEURON_gpu_nightly-8.2a7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/neuron/rxd/dimension3.py | import bisect
import numpy
from .geometry3d.graphicsPrimitives import Cylinder, Cone
from neuron import h
# TODO: remove indirection (e.g. use _h_x3d instead of h.x3d)
def centroids_by_segment(sec):
"""
given a section, returns a dictionary whose entries are lists of cylinders
of radius 0 that should be used for distance calculations, keyed by section
.. warning::
Does not currently support non-frustum based sections (i.e. no support
for new 3d styles, like soma outlines)
.. warning::
This assumes a 3d style exists. The safest way to call this is to call
h.define_shape() first
"""
# TODO: fix the issue described in the warning
# (when this was written, these objects were only under development)
n3d = sec.n3d()
length = sec.L
arc3d = [sec.arc3d(i) for i in range(n3d)]
x3d = numpy.array([sec.x3d(i) for i in range(n3d)])
y3d = numpy.array([sec.y3d(i) for i in range(n3d)])
z3d = numpy.array([sec.z3d(i) for i in range(n3d)])
diam3d = numpy.array([sec.diam3d(i) for i in range(n3d)])
dx = length / sec.nseg
objs = {}
for i in range(sec.nseg):
x_lo = i * dx
x_hi = (i + 1) * dx
pts = [x_lo] + _values_strictly_between(x_lo, x_hi, arc3d) + [x_hi]
local_x3d = numpy.interp(pts, arc3d, x3d)
local_y3d = numpy.interp(pts, arc3d, y3d)
local_z3d = numpy.interp(pts, arc3d, z3d)
local_diam3d = numpy.interp(pts, arc3d, diam3d)
local_objs = []
for j in range(len(pts) - 1):
x0, y0, z0, r0 = (
local_x3d[j],
local_y3d[j],
local_z3d[j],
local_diam3d[j] / 2.0,
)
x1, y1, z1, r1 = (
local_x3d[j + 1],
local_y3d[j + 1],
local_z3d[j + 1],
local_diam3d[j + 1] / 2.0,
)
if x0 != x1 or y0 != y1 or z0 != z1:
local_objs.append(Cylinder(x0, y0, z0, x1, y1, z1, 0))
objs[sec((i + 0.5) / sec.nseg)] = local_objs
return objs
def objects_by_segment(sec):
"""
given a section, returns a dictionary whose entries are lists of objects
that should be used for distance calculations, keyed by section
.. warning::
Does not currently support non-frustum based sections (i.e. no support
for new 3d styles, like soma outlines)
.. warning::
This assumes a 3d style exists. The safest way to call this is to call
h.define_shape() first
"""
# TODO: fix the issue described in the warning
# (when this was written, these objects were only under development)
n3d = sec.n3d()
length = sec.L
arc3d = [sec.arc3d(i) for i in range(n3d)]
x3d = numpy.array([sec.x3d(i) for i in range(n3d)])
y3d = numpy.array([sec.y3d(i) for i in range(n3d)])
z3d = numpy.array([sec.z3d(i) for i in range(n3d)])
diam3d = numpy.array([sec.diam3d(i) for i in range(n3d)])
dx = length / sec.nseg
objs = {}
for i in range(sec.nseg):
x_lo = i * dx
x_hi = (i + 1) * dx
pts = [x_lo] + _values_strictly_between(x_lo, x_hi, arc3d) + [x_hi]
local_x3d = numpy.interp(pts, arc3d, x3d)
local_y3d = numpy.interp(pts, arc3d, y3d)
local_z3d = numpy.interp(pts, arc3d, z3d)
local_diam3d = numpy.interp(pts, arc3d, diam3d)
local_objs = []
for j in range(len(pts) - 1):
x0, y0, z0, r0 = (
local_x3d[j],
local_y3d[j],
local_z3d[j],
local_diam3d[j] / 2.0,
)
x1, y1, z1, r1 = (
local_x3d[j + 1],
local_y3d[j + 1],
local_z3d[j + 1],
local_diam3d[j + 1] / 2.0,
)
if r0 == r1:
local_objs.append(Cylinder(x0, y0, z0, x1, y1, z1, r0))
else:
local_objs.append(Cone(x0, y0, z0, r0, x1, y1, z1, r1))
objs[sec((i + 0.5) / sec.nseg)] = local_objs
return objs
def _values_between(lo, hi, data):
i_lo = bisect.bisect_left(data, lo)
i_hi = bisect.bisect_right(data, hi)
return data[i_lo:i_hi]
def _values_strictly_between(lo, hi, data):
temp = _values_between(lo, hi, data)
if temp and temp[0] == lo:
temp = temp[1:]
if temp and temp[-1] == hi:
temp = temp[:-1]
return temp | PypiClean |
/Emonic-1.0.1.tar.gz/Emonic-1.0.1/emonic/contrib/BaseModal.py | from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.utils import send_from_directory, safe_join
from ..core.branch import Emonic, Response, json, SharedDataMiddleware, Environment, FileSystemLoader, url_encode, Map, base64, os
from ..globals import csrf
from urllib.parse import urljoin, urlencode
app = Emonic(__name__)
def render(template_name, **kwargs):
template = app.template_env.get_template(template_name)
kwargs['url_for'] = url_for
kwargs['csrf_token'] = csrf.generate_csrf_token()
response = Response(template.render(**kwargs), mimetype='text/html')
csrf.set_csrf_token_cookie(response, kwargs['csrf_token'])
return response
# JSON response function
def JsonResponse(data):
json_data = json.dumps(data)
return Response(json_data, mimetype='application/json')
# Redirect function
def redirect(location, code=302) -> Response:
return Response('', status=code, headers={'Location': location})
# URL building function
def url_for(endpoint, **values) -> str:
if endpoint == 'static':
filename = values.get('filename', None)
if filename:
return f'/{app.static_folder}/{filename}'
else:
raise ValueError("Static filename not provided")
elif endpoint == 'redirect':
location = values.get('location', None)
if location:
args = values.get('args', {})
if args:
location = urljoin(location, f'?{urlencode(args)}')
return location
else:
raise ValueError("Redirect location not provided")
elif endpoint == 'user_profile':
username = values.get('username', None)
if username:
return f'/users/{username}'
else:
raise ValueError("Username not provided")
elif endpoint == 'article':
article_id = values.get('article_id', None)
if article_id:
return f'/articles/{article_id}'
else:
raise ValueError("Article ID not provided")
elif endpoint == 'category':
category_name = values.get('category_name', None)
if category_name:
return f'/categories/{category_name}'
else:
raise ValueError("Category name not provided")
elif endpoint == 'search':
query = values.get('query', None)
if query:
return f'/search?q={urlencode(query)}'
else:
raise ValueError("Search query not provided")
else:
raise ValueError("Unknown endpoint")
# Send file with headers function
def send_file(filename, mimetype):
with open(filename, 'rb') as f:
content = f.read()
headers = {'Content-Type': mimetype, 'Content-Disposition': f'attachment; filename={os.path.basename(filename)}'}
return Response(content, headers=headers)
# Middleware for serving static files
def static_engine(static_folder):
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {'/static': static_folder})
# Set template loader
def template_engine(template_folder):
app.template_env = Environment(loader=FileSystemLoader(template_folder))
# Save JSON content to a file
def SaveJsonContent(data, filename):
with open(filename, 'w') as f:
json.dump(data, f)
# Redirect with query parameters function
def redirect_args(location, **kwargs):
query_params = url_encode(kwargs)
url = f'{location}?{query_params}' if kwargs else location
return Response(status=302, headers={'Location': url})
# Map routes using rules
def url_map(rules):
return Map(rules)
# Stream with context function
def stream_with_context(generator_or_function):
def generate():
for item in generator_or_function():
yield item
return Response(generate())
# Generate a unique key
def make_unique_key():
return base64.urlsafe_b64encode(os.urandom(32)).rstrip(b'=').decode('ascii')
# Encode URLs safely
def url_quote(url, safe='/', encoding=None, errors=None):
return url_quote(url, safe=safe, encoding=encoding, errors=errors)
def url_quote_plus(url, safe='/', encoding=None, errors=None):
return url_quote_plus(url, safe=safe, encoding=encoding, errors=errors)
# Join directory paths safely
def safe_join(directory, *pathnames):
return safe_join(directory, *pathnames)
# Set context processor
def context_processor(f):
app.template_env.globals.update(f())
# Open resource file
def open_resource(resource):
return open(resource, 'rb')
# Define template filters
def template_filter(name=None):
def decorator(f):
app.template_env.filters[name or f.__name__] = f
return f
return decorator
# Set URL defaults for view functions
def url_defaults(f):
app.url_map.url_defaults(f)
# Get attribute from a template
def get_template_attribute(template_name, attribute):
return getattr(app.template_env.get_template(template_name), attribute)
# Abort request with HTTPException
def abort(code):
raise HTTPException(code)
# Make response with appropriate content type
def make_response(response, status=200, headers=None):
if isinstance(response, (str, bytes)):
return Response(response, status=status, headers=headers)
return response | PypiClean |
/MolVS-0.1.1.tar.gz/MolVS-0.1.1/CONTRIBUTING.rst | Contributing
============
.. sectionauthor:: Matt Swain <[email protected]>
Contributions of any kind are greatly appreciated!
Feedback
--------
The `Issue Tracker`_ is the best place to post any feature ideas, requests and bug reports.
The following are especially welcome:
- General feedback on whether any standardization stages should work differently.
- Specific molecules that don't validate or standardize as expected.
- Ideas for new validation and standardization stages.
Contributing
------------
If you are able to contribute changes yourself, just fork the `source code`_ on GitHub, make changes and file a pull
request. All contributions are welcome, no matter how big or small.
The following are especially welcome:
- New validation or standardization stages.
- Alternative tautomer transforms and scores.
- Lists of salts and solvents to strip out.
- New or improved documentation of existing features.
Quick guide to contributing
~~~~~~~~~~~~~~~~~~~~~~~~~~~
1. `Fork the MolVS repository on GitHub`_, then clone your fork to your local machine::
git clone https://github.com/<username>/MolVS.git
cd molvs
2. Install the development requirements into a `conda environment`_::
conda env create -n molvs -f environment.yml
source activate molvs
3. Create a new branch for your changes::
git checkout -b <name-for-changes>
4. Make your changes or additions. Ideally add some tests and ensure they pass by running::
pytest
5. Commit your changes and push to your fork on GitHub::
git add .
git commit -m "<description-of-changes>"
git push origin <name-for-changes>
4. `Submit a pull request`_.
Tips
~~~~
- Follow the `PEP8`_ style guide.
- Include docstrings as described in `PEP257`_.
- Try and include tests that cover your changes.
- Try to write `good commit messages`_.
- Consider `squashing your commits`_ with rebase.
- Read the GitHub help page on `Using pull requests`_.
.. _`Issue Tracker`: https://github.com/mcs07/MolVS/issues
.. _`source code`: https://github.com/mcs07/MolVS
.. _`Fork the MolVS repository on GitHub`: https://github.com/mcs07/MolVS/fork
.. _`conda environment`: https://conda.io/docs/
.. _`Submit a pull request`: https://github.com/mcs07/MolVS/compare/
.. _`squashing your commits`: http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html
.. _`PEP8`: https://www.python.org/dev/peps/pep-0008
.. _`PEP257`: https://www.python.org/dev/peps/pep-0257
.. _`good commit messages`: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
.. _`Using pull requests`: https://help.github.com/articles/using-pull-requests
| PypiClean |
/DLMS_SPODES-0.40.11.tar.gz/DLMS_SPODES-0.40.11/src/DLMS_SPODES/hdlc/frame.py | from __future__ import annotations
from abc import ABC, abstractmethod
from struct import unpack, pack
from functools import cached_property
from typing import Deque
from enum import IntFlag
import logging
logger = logging.getLogger(__name__)
logger.level = logging.INFO
_FLAG: int = 0x7e
class NotEnoughDataError(Exception):
""" Not enough data received, need more for parse full Frame """
class FormatDataError(Exception):
""" Frame format is not Type 3 HDLC """
class Format:
""" This optional field is present only when using the non-basic frame format. When present, it follows the opening flag sequence. The frame format field is 2 octets in length
and consists of three subfields referred to as the format type subfield, the segmentation subfield and the frame length subfield. The format of the frame format field is as
follows, ISO/IEC 13239:2002(E), 4.9 Frame format field, Type 3 :
Type(4 bits) - Segmentation(1 bit) - Length(11 bit) """
__content: bytes
def __init__(self, content: bytes = None,
is_segmentation: bool = None,
length: int = None):
if content is not None:
if len(content) != 2:
raise ValueError(F'Wrong length Frame format type, must be 2, got {len(content)}')
else:
self.__content = content
if self.type != 0xA:
raise FormatDataError(F'Frame format type not according HDLC Type 3, must be 0xA, got {hex(self.type)}')
else:
if length.bit_length() <= 13:
value = length
if is_segmentation:
value |= 0b1010_1_00000000000
else:
value |= 0b1010_0_00000000000
self.__content = pack('>H', value)
else:
raise ValueError(F'Frame length overflow, max be 2048, got {length}')
@property
def content(self) -> bytes:
return self.__content
@cached_property
def type(self) -> int:
""" Must be 0b1010 in first 4 bits """
return self.__content[0] >> 4
@cached_property
def length(self) -> int:
""" return length of frame. Mask 11bit. """
return unpack('>H', self.__content)[0] & 0b0000_0_111_11111111
@cached_property
def is_segmentation(self) -> bool:
return bool(self.__content[0] & 0b00001000)
def __str__(self):
return F'Type 3: length-{self.length} {"segmentation" if self.is_segmentation else ""}'
class Address:
__content: bytes
def __init__(self, content: bytes = None,
upper_address: int = None,
lower_address: int = None):
if content is not None:
if len(content) not in (1, 2, 4):
raise ValueError(F'Wrong length Frame format type, must be 1, 2 or 4 bytes, got {len(content)}')
else:
self.__content = content
else:
if lower_address is None:
if upper_address <= 0x7f:
self.__content = pack('B', upper_address << 1 | 1)
elif upper_address > 0x7f and lower_address is None:
self.__content = pack('BB',
upper_address >> 6 & 0b00111110,
upper_address << 1 & 0b11111110) + b'\x00\x01'
else:
raise ValueError(F'Upper address wrong, expected 0..13383, got {upper_address}')
else:
if upper_address <= 0x7f and lower_address <= 0x7f:
self.__content = pack("BB", upper_address << 1, lower_address << 1 | 1)
else:
self.__content = pack("BBBB",
upper_address >> 6 & 0b11111110,
upper_address << 1 & 0b11111110,
lower_address >> 6 & 0b11111110,
lower_address << 1 & 0b11111110 | 1)
@classmethod
def from_frame(cls, value: bytearray) -> Address:
for it in (0, 1, 3):
if value[it] % 2 == 1:
match it:
case 0: new = cls(bytes(value[:1])); break
case 1: new = cls(bytes(value[:2])); break
case 3: new = cls(bytes(value[:4])); break
else:
raise ValueError('HDLC source address wrong, not found end bit')
del value[:len(new)]
return new
@property
def content(self) -> bytes:
return self.__content
def __eq__(self, other: Address):
return self.__content == other.content
@cached_property
def upper(self) -> int:
""" return of upper address with int type """
if len(self.__content) in (1, 2):
return self.__content[0] >> 1
else:
return (self.__content[0] >> 1)*128 + (self.__content[1] >> 1)
@cached_property
def lower(self) -> int | None:
""" return of lower address with int type """
if len(self.__content) == 1:
return None
elif len(self.__content) == 2:
return self.__content[1] >> 1
else:
return (self.__content[2] >> 1)*128 + (self.__content[3] >> 1)
def __str__(self):
return F'{self.upper}{"/"+str(self.lower) if self.lower is not None else ""}'
def __len__(self):
return len(self.__content)
def __hash__(self):
return int.from_bytes(self.__content, "big")
def __repr__(self):
return F"{self.__class__.__name__}(upper_address={self.upper}, lower_address={self.lower})"
_type = ['Information', 'Supervisory', 'Information', 'Unnumbered']
class Control(IntFlag):
""" ISO/IEC 13239:2002(E). P/F = poll bit -- primary station or combined station command frame transmissions/final bit -- secondary station or combined station response
frame transmissions (1 = poll/final) """
# Information transfer command/ response (I format):
# 1 2 3 4 5 6 7 8
# 0 | N(S) | P/F | N(R)
S0_R0 = 0b000_0_000_0
S1_R0 = 0b000_0_001_0
S2_R0 = 0b000_0_010_0
S3_R0 = 0b000_0_011_0
S4_R0 = 0b000_0_100_0
S5_R0 = 0b000_0_101_0
S6_R0 = 0b000_0_110_0
S7_R0 = 0b000_0_111_0
S0_R1 = 0b001_0_000_0
S1_R1 = 0b001_0_001_0
S2_R1 = 0b001_0_010_0
S3_R1 = 0b001_0_011_0
S4_R1 = 0b001_0_100_0
S5_R1 = 0b001_0_101_0
S6_R1 = 0b001_0_110_0
S7_R1 = 0b001_0_111_0
S0_R2 = 0b010_0_000_0
S1_R2 = 0b010_0_001_0
S2_R2 = 0b010_0_010_0
S3_R2 = 0b010_0_011_0
S4_R2 = 0b010_0_100_0
S5_R2 = 0b010_0_101_0
S6_R2 = 0b010_0_110_0
S7_R2 = 0b010_0_111_0
S0_R3 = 0b011_0_000_0
S1_R3 = 0b011_0_001_0
S2_R3 = 0b011_0_010_0
S3_R3 = 0b011_0_011_0
S4_R3 = 0b011_0_100_0
S5_R3 = 0b011_0_101_0
S6_R3 = 0b011_0_110_0
S7_R3 = 0b011_0_111_0
S0_R4 = 0b100_0_000_0
S1_R4 = 0b100_0_001_0
S2_R4 = 0b100_0_010_0
S3_R4 = 0b100_0_011_0
S4_R4 = 0b100_0_100_0
S5_R4 = 0b100_0_101_0
S6_R4 = 0b100_0_110_0
S7_R4 = 0b100_0_111_0
S0_R5 = 0b101_0_000_0
S1_R5 = 0b101_0_001_0
S2_R5 = 0b101_0_010_0
S3_R5 = 0b101_0_011_0
S4_R5 = 0b101_0_100_0
S5_R5 = 0b101_0_101_0
S6_R5 = 0b101_0_110_0
S7_R5 = 0b101_0_111_0
S0_R6 = 0b110_0_000_0
S1_R6 = 0b110_0_001_0
S2_R6 = 0b110_0_010_0
S3_R6 = 0b110_0_011_0
S4_R6 = 0b110_0_100_0
S5_R6 = 0b110_0_101_0
S6_R6 = 0b110_0_110_0
S7_R6 = 0b110_0_111_0
S0_R7 = 0b111_0_000_0
S1_R7 = 0b111_0_001_0
S2_R7 = 0b111_0_010_0
S3_R7 = 0b111_0_011_0
S4_R7 = 0b111_0_100_0
S5_R7 = 0b111_0_101_0
S6_R7 = 0b111_0_110_0
S7_R7 = 0b111_0_111_0
S0_R0_PF = 0b000_1_000_0
S1_R0_PF = 0b000_1_001_0
S2_R0_PF = 0b000_1_010_0
S3_R0_PF = 0b000_1_011_0
S4_R0_PF = 0b000_1_100_0
S5_R0_PF = 0b000_1_101_0
S6_R0_PF = 0b000_1_110_0
S7_R0_PF = 0b000_1_111_0
S0_R1_PF = 0b001_1_000_0
S1_R1_PF = 0b001_1_001_0
S2_R1_PF = 0b001_1_010_0
S3_R1_PF = 0b001_1_011_0
S4_R1_PF = 0b001_1_100_0
S5_R1_PF = 0b001_1_101_0
S6_R1_PF = 0b001_1_110_0
S7_R1_PF = 0b001_1_111_0
S0_R2_PF = 0b010_1_000_0
S1_R2_PF = 0b010_1_001_0
S2_R2_PF = 0b010_1_010_0
S3_R2_PF = 0b010_1_011_0
S4_R2_PF = 0b010_1_100_0
S5_R2_PF = 0b010_1_101_0
S6_R2_PF = 0b010_1_110_0
S7_R2_PF = 0b010_1_111_0
S0_R3_PF = 0b011_1_000_0
S1_R3_PF = 0b011_1_001_0
S2_R3_PF = 0b011_1_010_0
S3_R3_PF = 0b011_1_011_0
S4_R3_PF = 0b011_1_100_0
S5_R3_PF = 0b011_1_101_0
S6_R3_PF = 0b011_1_110_0
S7_R3_PF = 0b011_1_111_0
S0_R4_PF = 0b100_1_000_0
S1_R4_PF = 0b100_1_001_0
S2_R4_PF = 0b100_1_010_0
S3_R4_PF = 0b100_1_011_0
S4_R4_PF = 0b100_1_100_0
S5_R4_PF = 0b100_1_101_0
S6_R4_PF = 0b100_1_110_0
S7_R4_PF = 0b100_1_111_0
S0_R5_PF = 0b101_1_000_0
S1_R5_PF = 0b101_1_001_0
S2_R5_PF = 0b101_1_010_0
S3_R5_PF = 0b101_1_011_0
S4_R5_PF = 0b101_1_100_0
S5_R5_PF = 0b101_1_101_0
S6_R5_PF = 0b101_1_110_0
S7_R5_PF = 0b101_1_111_0
S0_R6_PF = 0b110_1_000_0
S1_R6_PF = 0b110_1_001_0
S2_R6_PF = 0b110_1_010_0
S3_R6_PF = 0b110_1_011_0
S4_R6_PF = 0b110_1_100_0
S5_R6_PF = 0b110_1_101_0
S6_R6_PF = 0b110_1_110_0
S7_R6_PF = 0b110_1_111_0
S0_R7_PF = 0b111_1_000_0
S1_R7_PF = 0b111_1_001_0
S2_R7_PF = 0b111_1_010_0
S3_R7_PF = 0b111_1_011_0
S4_R7_PF = 0b111_1_100_0
S5_R7_PF = 0b111_1_101_0
S6_R7_PF = 0b111_1_110_0
S7_R7_PF = 0b111_1_111_0
# Supervisory commands/ responses (S format): S = supervisory function bit
# 1 2 3 4 5 6 7 8
# 1 0 S S P/F | N(R)
RR_R0 = 0b000_0_00_01
""" Receive ready sequence=0 """
RR_R1 = 0b001_0_00_01
""" Receive ready sequence=1 """
RR_R2 = 0b010_0_00_01
""" Receive ready sequence=2 """
RR_R3 = 0b011_0_00_01
""" Receive ready sequence=3 """
RR_R4 = 0b100_0_00_01
""" Receive ready sequence=4 """
RR_R5 = 0b101_0_00_01
""" Receive ready sequence=5 """
RR_R6 = 0b110_0_00_01
""" Receive ready sequence=6 """
RR_R7 = 0b111_0_00_01
""" Receive ready sequence=7 """
RR_R0_PF = 0b000_1_00_01
""" Receive ready sequence=0 """
RR_R1_PF = 0b001_1_00_01
""" Receive ready sequence=1 """
RR_R2_PF = 0b010_1_00_01
""" Receive ready sequence=2 """
RR_R3_PF = 0b011_1_00_01
""" Receive ready sequence=3 """
RR_R4_PF = 0b100_1_00_01
""" Receive ready sequence=4 """
RR_R5_PF = 0b101_1_00_01
""" Receive ready sequence=5 """
RR_R6_PF = 0b110_1_00_01
""" Receive ready sequence=6 """
RR_R7_PF = 0b111_1_00_01
""" Receive ready sequence=7 """
RNR_R0 = 0b000_0_01_01
RNR_R1 = 0b001_0_01_01
RNR_R2 = 0b010_0_01_01
RNR_R3 = 0b011_0_01_01
RNR_R4 = 0b100_0_01_01
RNR_R5 = 0b101_0_01_01
RNR_R6 = 0b110_0_01_01
RNR_R7 = 0b111_0_01_01
RNR_R0_PF = 0b000_1_01_01
RNR_R1_PF = 0b001_1_01_01
RNR_R2_PF = 0b010_1_01_01
RNR_R3_PF = 0b011_1_01_01
RNR_R4_PF = 0b100_1_01_01
RNR_R5_PF = 0b101_1_01_01
RNR_R6_PF = 0b110_1_01_01
RNR_R7_PF = 0b111_1_01_01
REJ_R0 = 0b000_0_10_01
REJ_R1 = 0b001_0_10_01
REJ_R2 = 0b010_0_10_01
REJ_R3 = 0b011_0_10_01
REJ_R4 = 0b100_0_10_01
REJ_R5 = 0b101_0_10_01
REJ_R6 = 0b110_0_10_01
REJ_R7 = 0b111_0_10_01
REJ_R0_PF = 0b000_1_10_01
REJ_R1_PF = 0b001_1_10_01
REJ_R2_PF = 0b010_1_10_01
REJ_R3_PF = 0b011_1_10_01
REJ_R4_PF = 0b100_1_10_01
REJ_R5_PF = 0b101_1_10_01
REJ_R6_PF = 0b110_1_10_01
REJ_R7_PF = 0b111_1_10_01
SREJ_R0 = 0b000_0_11_01
SREJ_R1 = 0b001_0_11_01
SREJ_R2 = 0b010_0_11_01
SREJ_R3 = 0b011_0_11_01
SREJ_R4 = 0b100_0_11_01
SREJ_R5 = 0b101_0_11_01
SREJ_R6 = 0b110_0_11_01
SREJ_R7 = 0b111_0_11_01
SREJ_R0_PF = 0b000_1_11_01
SREJ_R1_PF = 0b001_1_11_01
SREJ_R2_PF = 0b010_1_11_01
SREJ_R3_PF = 0b011_1_11_01
SREJ_R4_PF = 0b100_1_11_01
SREJ_R5_PF = 0b101_1_11_01
SREJ_R6_PF = 0b110_1_11_01
SREJ_R7_PF = 0b111_1_11_01
# Unnumbered commands/ responses (U format): M = modifier function bit
# 11_MM_P/F_MMM
UI_PF = 0b000_1_00_11
""" Unnumbered Information with Poll """
UI = 0b000_0_00_11
""" Unnumbered Information with wait """
XID_PF = 0b101_1_11_11
""" Exchange identification with Poll. Used to Request/Report capabilities """
XID = 0b101_0_11_11
""" Exchange identification with wait. Used to Request/Report capabilities """
TEST_PF = 0b111_1_00_11
""" TEST with Poll. Exchange identical information fields for testing """
TEST = 0b111_0_00_11
""" TEST with wait. Exchange identical information fields for testing """
UIH_PF = 0b111_1_11_11
""" Unnumbered Information with Header check with Poll """
UIH = 0b111_0_11_11
""" Unnumbered Information with Header check with wait """
# command ISO/IEC 13239:2002(E) 5.5.3.3
# 11_MM_P_MMM
SNRM_P = 0b100_1_00_11
""" Set Normal Response Mode with Poll """
SNRM = 0b100_0_00_11
""" Set Normal Response Mode with wait """
SARM_P = 0b000_1_11_11
""" Set Asynchronous Response Mode with Poll """
SARM = 0b000_0_11_11
""" Set Asynchronous Response with wait """
SABM_P = 0b001_1_11_11
""" Set Asynchronous Balanced Mode with Poll """
SABM = 0b001_0_11_11
""" Set Asynchronous Balanced with wait """
DISC_P = 0b010_1_00_11
""" Disconnect with Poll """
DISC = 0b010_0_00_11
""" Disconnect with wait """
SNRME_P = 0b110_1_11_11
""" Set Normal Response Mode Extended with Poll """
SNRME = 0b110_0_11_11
""" Set Normal Response Mode Extended with wait """
SARME_P = 0b010_1_11_11
""" Set Asynchronous Response Mode Extended with Poll """
SARME = 0b010_0_11_11
""" Set Asynchronous Response Mode Extended with wait """
SABME_P = 0b011_1_11_11
""" Set Asynchronous Balanced Mode Extended with Poll """
SABME = 0b011_0_11_11
""" Set Asynchronous Balanced Mode Extended with wait """
UP_P = 0b001_1_00_11
""" Unnumbered Poll with Poll. Used to solicit control information"""
UP = 0b001_0_00_11
""" Unnumbered Poll with wait. Used to solicit control information"""
SIM_P = 0b000_1_01_11
""" Set Initialization Mode with Poll """
SIM = 0b000_0_01_11
""" Set Initialization Mode with wait """
SM_P = 0b110_1_00_11
""" Set Mode with Poll """
SM = 0b110_0_00_11
""" Set Mode with wait """
RSET_P = 0b100_1_11_11
""" ReSET with Poll. Used for recovery. Resets N(R) but not N(S) """
RSET = 0b100_0_11_11
""" ReSET with wait. Used for recovery. Resets N(R) but not N(S) """
# responses ISO/IEC 13239:2002(E) 5.5.3.4.
# 11_MM_F_MMM
UA_F = 0b011_1_00_11
""" Unnumbered Acknowledgement Final """
UA = 0b011_0_00_11
""" Unnumbered Acknowledgement """
FRMR_F = 0b100_1_01_11
""" FRaMe Reject Final """
FRMR = 0b100_0_01_11
""" FRaMe Reject """
DM_F = 0b000_1_11_11
""" Disconnected Mode Final """
DM = 0b000_0_11_11
""" Disconnected Mode """
RD_F = 0b010_1_00_11
""" Request Disconnect Final. Solicitation for DISC Command """
RD = 0b010_0_00_11
""" Request Disconnect. Solicitation for DISC Command """
RIM_F = 0b000_1_01_11
""" Request initialization mode Final """
RIM = 0b000_0_01_11
""" Request initialization mode """
def __add__(self, other):
return Control(self.value + other)
def __or__(self, other):
return Control(self.value | other)
def __and__(self, other):
return Control(self.value & other)
def __str__(self):
return F'{_type[self.value & 0b11]} {self.name}'
@classmethod
def from_frame(cls, value: bytearray) -> Control:
return Control(value.pop(0))
@property
def content(self) -> bytes:
return pack('B', self.value)
def is_info(self) -> bool:
""" check by information type """
return self.value & 0b1 == 0b0
def is_information(self) -> bool:
""" check by information in frame """
return self.is_info() or self == self.UI or self == self.UI_PF
def is_supervisory(self) -> bool:
""" check by supervisory type """
return self.value & 0b11 == 0b01
def is_unnumbered(self) -> bool:
""" check by unnumbered type """
return self.value & 0b11 == 0b11
def is_receive_ready(self) -> bool:
return self.value & 0b1111 == 0b0001
def is_receive_not_ready(self) -> bool:
return self.value & 0b1111 == 0b0101
def is_reject(self) -> bool:
return self.value & 0b1111 == 0b1001
def is_selective_reject(self) -> bool:
return self.value & 0b1111 == 0b1101
@cached_property
def is_poll(self) -> bool:
""" 5.4.3 Poll/final (P/F) bit """
return True if not self.is_unnumbered() and bool(self.value & 0b000_1_00_00) else False
@classmethod
def next_send_sequence(cls, value: Control) -> Control:
return Control(((value & 0xF0 | (value + 0x2) & 0xE) & 0xFF) & 0xFF)
# value &= 0b1111111_0 # make info from other TODO: is it a gurux bug???
# if value.is_info():
# return Control(value & 0b11110001 | (value + 0x2) & 0b00001110)
# else:
# raise ValueError(F'Increase sender supporting only for information type, got {value}')
@classmethod
def next_receiver_sequence(cls, value: Control) -> Control:
return Control(((value & 0xFF) + 0x20 | 0x10 | value & 0xE) & 0xFF)
# if value.is_info() or value.is_supervisory():
# return Control(value & 0b00011111 | 0x10 | (value + 0x20) & 0b11100000)
# else:
# raise ValueError(F'Increase sender supporting only for information and supervisory type, got {value}')
_CCITT = (0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF, 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E, 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD, 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C, 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB, 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A, 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9, 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738, 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7, 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036, 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5, 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134, 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3, 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232, 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1, 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330, 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78)
class CRC:
__content: bytes
def __init__(self, content: bytes = None,
message: bytes = None):
if content is not None:
if len(content) != 2:
raise ValueError(F'Wrong CRC length, must be 2, got {len(content)}')
else:
self.__content = content
else:
value = 0xFFFF
for i in message:
value = ((value >> 8) ^ _CCITT[(value ^ i) & 0xFF]) & 0xFFFF
self.__content = pack('H', ~value & 0xFFFF)
@classmethod
def from_frame(cls, value: bytearray, message: bytes = None) -> CRC:
new = cls(content=bytes(value[:2]))
if message is not None and cls(message=message).content == new.content:
del value[:2]
return new
else:
raise ValueError('Wrong CRC')
@property
def content(self) -> bytes:
return self.__content
def __str__(self):
return self.__content.hex(' ')
class Info(ABC):
@property
@abstractmethod
def content(self) -> bytes:
""" return content in bytes """
@abstractmethod
def __len__(self):
""" return content length """
@property
@abstractmethod
def info(self) -> bytes:
""" return information in bytes """
class Frame:
""" ISO/IEC 13239:2002(E), 4. In HDLC, all transmissions are in frames. Frames may be either in basic frame format or in non-basic frame format. Neither the basic nor the
non-basic frame format structure includes bits inserted for bit-synchronization (i.e., start or stop elements see 4.3.2) or bits or octets inserted for transparency (see 4.3).
Basic and non-basic frame formats can not be used simultaneously on the same media. See Clause 7.5 for the rules for negotiating from the basic frame format to the non-basic
frame format. However, it is possible for different format types of the non-basic frame to exist simultaneously on the same media. """
__FLAG_content: bytes = pack('B', _FLAG)
__format: Format
__destination_address: Address
__source_address: Address
__control: Control
__hcs: CRC | None
__info: bytes
__fcs: CRC
def __init__(self, content: bytearray = None,
DA: Address = None,
SA: Address = None,
control: Control = None,
info: bytes = None,
is_segmentation: bool = None):
if isinstance(content, bytearray):
if content[0] != _FLAG:
raise ValueError('Wrong start flag')
self.__format = Format(bytes(content[1:3]))
if self.__format.length + 2 > len(content): # 2 is length of flags(7e) in begin and end of frame
raise NotEnoughDataError(F'Frame length not according by it data: got frame with length {len(content)}, but length field is {self.__format.length}')
else:
content.pop(0) # remove start flag
if content[self.__format.length] != _FLAG:
raise ValueError('Wrong length or HDLC end flag')
else:
remaining_frame_data: bytearray = content[2:self.__format.length]
""" for parsing in part """
self.__destination_address = Address.from_frame(remaining_frame_data)
self.__source_address = Address.from_frame(remaining_frame_data)
self.__control = Control.from_frame(remaining_frame_data)
if len(remaining_frame_data) == 2: # info is absence
self.__hcs = None
self.__info = bytes()
else:
self.__hcs = CRC.from_frame(value=remaining_frame_data,
message=self.__header_sequence)
self.__info = bytes(remaining_frame_data[:-2])
self.__fcs = CRC.from_frame(value=remaining_frame_data[-2:],
message=self.__frame_sequence)
del content[:self.__format.length]
else:
self.__destination_address = DA
self.__source_address = SA
self.__control = control
self.__info = info
# Frames that do not have an information field, e.g., as with some supervisory frames, or an information field of zero length do not contain an HCS and an FCS,
# only an FCS. ISO/IEC 13239:2002(E), H.4 Frame format type 3. 7:5 = format + control + HCS? + FCS
if len(self.__info) == 0:
self.__format = Format(is_segmentation=is_segmentation,
length=len(self.__destination_address) + len(self.__source_address) + 5)
self.__hcs = None
else:
self.__format = Format(is_segmentation=is_segmentation,
length=len(self.__destination_address) + len(self.__source_address) + len(self.__info) + 7)
self.__hcs = CRC(message=self.__header_sequence)
self.__fcs = CRC(message=self.__frame_sequence)
def get_header(self) -> tuple[Address, Address]:
""" return SA, DA for reusing """
return self.__destination_address, self.__source_address
@classmethod
def try_from(cls, value: bytearray) -> Frame | None:
""" Search of HDLC start flag and return Frame and value remains for next searching. If wrong frame when return value with out start flag for parsing """
while len(value) != 0 and value[0] != _FLAG: # remove all bytes before flag
value.pop(0)
if len(value) < 9: # where 9 is min length of HDLC frame type-3
return None
else:
try:
return cls(value)
except ValueError as e:
logger.info(F'Wrong Frame: {e.args[0]}')
return None
except NotEnoughDataError as e:
logger.info(F'Frame Error: {e.args[0]}')
return None
except FormatDataError as e:
logger.info(F'Frame Error: {e.args[0]}')
value.pop(0)
return None
@staticmethod
def flag() -> int:
""" return flag frame """
return _FLAG
@property
def __header_sequence(self) -> bytes:
return self.__format.content + self.__destination_address.content + self.__source_address.content + self.__control.content
@property
def __frame_sequence(self) -> bytes:
if self.__hcs is None:
return self.__header_sequence
else:
return self.__header_sequence + self.__hcs.content + self.__info
@cached_property
def content(self) -> bytes:
return Frame.__FLAG_content + self.__frame_sequence + self.__fcs.content + Frame.__FLAG_content
def __str__(self):
return F'{self.__control.name} DA:{self.__destination_address} SA:{self.__source_address} {" Info["+str(len(self.__info))+"]:"+self.__info.hex(" ") if len(self.__info) != 0 else ""}'
def __len__(self):
return self.__format.length
def is_for_me(self, DA: Address, SA: Address) -> bool:
""" compare by DA and SA received frame"""
return DA == self.__source_address and SA == self.__destination_address
@property
def control(self):
return self.__control
@cached_property
def is_segmentation(self) -> bool:
return self.__format.is_segmentation
@property
def info(self) -> bytes:
return self.__info
def is_next(self, other: Frame) -> bool:
""" return TRUE if frame is next information frame of current. Other must be previous. """
return self.__control == Control.next_send_sequence(Control.next_receiver_sequence(other.control))
def is_next_send(self, other: Frame) -> bool:
""" return TRUE if frame is next information frame of current. Other must be previous. """
return self.__control == Control.next_send_sequence(other.control)
@staticmethod
def join_info(frames: Deque[Frame]) -> bytearray:
""" TODO: """
while len(frames) != 0:
frame: Frame = frames.popleft()
if frame.control.is_info():
info: bytearray = bytearray(frame.info)
break
else:
logger.warning(F'Frame {frame} not handled and deleted')
else:
raise ValueError('Not found information Frame')
while frame.is_segmentation:
if len(frames) == 0:
raise ValueError('Not found end information Frame')
else:
next_frame: Frame = frames.popleft()
if next_frame.control.is_info() and next_frame.is_next_send(frame):
info.extend(next_frame.info)
frame = next_frame
else:
logger.warning(F'Frame {frame} not handled and deleted')
return info
if __name__ == '__main__':
ad1 = Address(upper_address=0x3f,
lower_address=1)
ad2 = Address(upper_address=0x3f,
lower_address=1)
print(ad1)
comp = ad1 == ad2
comp2 = ad1 is ad2
a = Frame(upper_destination_address=0x3f,
upper_source_address=1,
control=Control(0x10),
info=bytes(),
is_segmentation=False)
head = a.get_header()
a1 = Frame(upper_destination_address=0x3,
upper_source_address=10,
control=Control(0x10),
info=bytes(),
is_segmentation=False,
header=head
)
comp3 = a1.is_for_me(head)
print(a)
# a1 = Frame(upper_destination_address=0x3f,
# upper_source_address=1,
# control=Control(0x32),
# info=bytes(),
# is_segmentation=False)
# print(a1)
# print(a1.is_next(a))
# data = bytearray.fromhex('7e a0 38 21 02 21 30 84 d4 e6 e7 00 61 29 a1 09 06 07 60 85 74 05 08 01 01 a2 03 02 01 00 a3 05 a1 03 02 01 00 be 10 04 0e 08 00 06 5f 1f 04 00 00 18 18 04 00 00 07 4e 98 7e')
# data = bytearray(b'~~\xa0\x1f!\x02!sV\xf4\x81\x80\x12\x05\x01\x80\x06\x01\x80\x07\x04\x00\x00\x00\x01\x08\x04\x00\x00\x00\x01S;~\xa0\x1f!\x02!sV\xf4\x81\x80\x12\x05\x01\x80\x06\x01\x80\x07\x04\x00\x00\x00\x01\x08\x04\x00\x00\x00\x01S;~')
# data = bytearray.fromhex('7e a8 87 21 02 21 7a fa 2c 07 e4 01 01 03 02 1e ff ff 80 00 00 15 00 00 00 00 db 69 14 81 15 00 00 00 00 00 49 8b f0 15 00 00 00 00 08 99 89 25 15 00 00 00 00 07 a1 9a 16 15 00 00 00 00 00 b2 3e cb 15 00 00 00 00 00 00 00 00 15 00 00 00 00 00 03 7e')
data = bytearray.fromhex('7E A8 01 41 02 21 52 99 A9 E6 E7 00 C4 02 C1 00 00 00 00 01 00 82 02 EA 01 81 9F 02 04 12 00 0F 11 01 09 06 00 00 28 00 00 FF 02 02 01 09 02 03 0F 01 16 01 00 02 03 0F 02 16 01 00 02 03 0F 03 16 01 00 02 03 0F 04 16 01 00 02 03 0F 05 16 01 00 02 03 0F 06 16 01 00 02 03 0F 07 16 01 00 02 03 0F 08 16 01 00 02 03 0F 09 16 01 00 01 04 02 02 0F 01 16 00 02 02 0F 02 16 00 02 02 0F 03 16 00 02 02 0F 04 16 00 02 04 12 00 08 11 00 09 06 00 00 01 00 00 FF 02 02 01 09 02 03 0F 01 16 01 00 02 03 0F 02 16 01 00 02 03 0F 03 16 01 00 02 03 0F 04 16 01 00 02 03 0F 05 16 01 00 02 03 0F 06 16 01 00 02 03 0F 07 16 01 00 02 03 0F 08 16 01 00 02 03 0F 09 16 01 00 01 06 02 02 0F 01 16 01 02 02 0F 02 16 01 02 02 0F 03 16 01 02 02 0F 04 16 01 02 02 0F 05 16 01 02 02 0F 06 16 01 02 1F FC 7E')
# data = bytearray(b'~\xa8\x87!\x02!\x96\x98\x01\xe6\xe7\x00\xc4\x01\xc1\x00\x01\n\x02\t\t\x0c\x07\xe5\x08\x06\x05\x0b\x1e\xff\xff\x80\x00\x00\x15\x00\x00\x00\x00\xda\x85\x9e~')
# data = bytearray(b'~\xa8\x89!\x03\x96\xae)\xe6\xe7\x00\xc4\x01\xc1\x00\x01\x07\x02\x02\x11\x00\x01\x05\x02\x03\t\x04\x00\x00\x00\xff\t\x06\x00\x00\n\x00d\xff\x12\x00\x01\x02\x03\t\x04\x01\x00\x00\xff\t\x06\x00\x00\n\x00d\xff\x12\x00\x02\x02\x03\t\x04\x0c\x17\x00\xff\t\x06\x00\x00\n\x00d\xff\x12\x00\x04\x02\x03\t\x04\x16\x1e\x00\xff\t\x06\x00\x00\n\x00d\xff\x12\x00\x04\x02\x03\t\x04\x17\x1e\x00\xff\t\x06\x00\x00\n\x00d\xff\x12\x00\x03\x02\x02\x11\x01\x01\x01\x02\x03\t\x04\x01\x00\x00\xff\t\x06\x00\x00\x19Q~')
frame1 = Frame.try_from(data)
print(frame1)
a = Control.SNRM_P
print(a) | PypiClean |
/NehorayRapid-0.0.1-py3-none-any.whl/mmedit/models/common/contextual_attention.py | from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
class ContextualAttentionModule(nn.Module):
"""Contexture attention module.
The details of this module can be found in:
Generative Image Inpainting with Contextual Attention
Args:
unfold_raw_kernel_size (int): Kernel size used in unfolding raw
feature. Default: 4.
unfold_raw_stride (int): Stride used in unfolding raw feature. Default:
2.
unfold_raw_padding (int): Padding used in unfolding raw feature.
Default: 1.
unfold_corr_kernel_size (int): Kernel size used in unfolding
context for computing correlation maps. Default: 3.
unfold_corr_stride (int): Stride used in unfolding context for
computing correlation maps. Default: 1.
unfold_corr_dilation (int): Dilation used in unfolding context for
computing correlation maps. Default: 1.
unfold_corr_padding (int): Padding used in unfolding context for
computing correlation maps. Default: 1.
scale (float): The resale factor used in resize input features.
Default: 0.5.
fuse_kernel_size (int): The kernel size used in fusion module.
Default: 3.
softmax_scale (float): The scale factor for softmax function.
Default: 10.
return_attention_score (bool): If True, the attention score will be
returned. Default: True.
"""
def __init__(self,
unfold_raw_kernel_size=4,
unfold_raw_stride=2,
unfold_raw_padding=1,
unfold_corr_kernel_size=3,
unfold_corr_stride=1,
unfold_corr_dilation=1,
unfold_corr_padding=1,
scale=0.5,
fuse_kernel_size=3,
softmax_scale=10,
return_attention_score=True):
super().__init__()
self.unfold_raw_kernel_size = unfold_raw_kernel_size
self.unfold_raw_stride = unfold_raw_stride
self.unfold_raw_padding = unfold_raw_padding
self.unfold_corr_kernel_size = unfold_corr_kernel_size
self.unfold_corr_stride = unfold_corr_stride
self.unfold_corr_dilation = unfold_corr_dilation
self.unfold_corr_padding = unfold_corr_padding
self.scale = scale
self.fuse_kernel_size = fuse_kernel_size
self.with_fuse_correlation = fuse_kernel_size > 1
self.softmax_scale = softmax_scale
self.return_attention_score = return_attention_score
if self.with_fuse_correlation:
assert fuse_kernel_size % 2 == 1
fuse_kernel = torch.eye(fuse_kernel_size).view(
1, 1, fuse_kernel_size, fuse_kernel_size)
self.register_buffer('fuse_kernel', fuse_kernel)
padding = int((fuse_kernel_size - 1) // 2)
self.fuse_conv = partial(F.conv2d, padding=padding, stride=1)
self.softmax = nn.Softmax(dim=1)
def forward(self, x, context, mask=None):
"""Forward Function.
Args:
x (torch.Tensor): Tensor with shape (n, c, h, w).
context (torch.Tensor): Tensor with shape (n, c, h, w).
mask (torch.Tensor): Tensor with shape (n, 1, h, w). Default: None.
Returns:
tuple(torch.Tensor): Features after contextural attention.
"""
# raw features to be used in copy (deconv)
raw_context = context
raw_context_cols = self.im2col(
raw_context,
kernel_size=self.unfold_raw_kernel_size,
stride=self.unfold_raw_stride,
padding=self.unfold_raw_padding,
normalize=False,
return_cols=True)
# resize the feature to reduce computational cost
x = F.interpolate(x, scale_factor=self.scale)
context = F.interpolate(context, scale_factor=self.scale)
context_cols = self.im2col(
context,
kernel_size=self.unfold_corr_kernel_size,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation,
normalize=True,
return_cols=True)
h_unfold, w_unfold = self.calculate_unfold_hw(
context.size()[-2:],
kernel_size=self.unfold_corr_kernel_size,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation,
)
# reshape context_cols to
# (n*h_unfold*w_unfold, c, unfold_mks, unfold_mks)
# 'mks' is short for 'mask_kernel_size'
context_cols = context_cols.reshape(-1, *context_cols.shape[2:])
# the shape of correlation map should be:
# (n, h_unfold*w_unfold, h', w')
correlation_map = self.patch_correlation(x, context_cols)
# fuse correlation map to enlarge consistent attention region.
if self.with_fuse_correlation:
correlation_map = self.fuse_correlation_map(
correlation_map, h_unfold, w_unfold)
correlation_map = self.mask_correlation_map(correlation_map, mask=mask)
attention_score = self.softmax(correlation_map * self.softmax_scale)
raw_context_filter = raw_context_cols.reshape(
-1, *raw_context_cols.shape[2:])
output = self.patch_copy_deconv(attention_score, raw_context_filter)
# deconv will cause overlap and we need to remove the effects of that
overlap_factor = self.calculate_overlap_factor(attention_score)
output /= overlap_factor
if self.return_attention_score:
n, _, h_s, w_s = attention_score.size()
attention_score = attention_score.view(n, h_unfold, w_unfold, h_s,
w_s)
return output, attention_score
return output
def patch_correlation(self, x, kernel):
"""Calculate patch correlation.
Args:
x (torch.Tensor): Input tensor.
kernel (torch.Tensor): Kernel tensor.
Returns:
torch.Tensor: Tensor with shape of (n, l, h, w).
"""
n, _, h_in, w_in = x.size()
patch_corr = F.conv2d(
x.view(1, -1, h_in, w_in),
kernel,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation,
groups=n)
h_out, w_out = patch_corr.size()[-2:]
return patch_corr.view(n, -1, h_out, w_out)
def patch_copy_deconv(self, attention_score, context_filter):
"""Copy patches using deconv.
Args:
attention_score (torch.Tensor): Tensor with shape of (n, l , h, w).
context_filter (torch.Tensor): Filter kernel.
Returns:
torch.Tensor: Tensor with shape of (n, c, h, w).
"""
n, _, h, w = attention_score.size()
attention_score = attention_score.view(1, -1, h, w)
output = F.conv_transpose2d(
attention_score,
context_filter,
stride=self.unfold_raw_stride,
padding=self.unfold_raw_padding,
groups=n)
h_out, w_out = output.size()[-2:]
return output.view(n, -1, h_out, w_out)
def fuse_correlation_map(self, correlation_map, h_unfold, w_unfold):
"""Fuse correlation map.
This operation is to fuse correlation map for increasing large
consistent correlation regions.
The mechanism behind this op is simple and easy to understand. A
standard 'Eye' matrix will be applied as a filter on the correlation
map in horizontal and vertical direction.
The shape of input correlation map is (n, h_unfold*w_unfold, h, w).
When adopting fusing, we will apply convolutional filter in the
reshaped feature map with shape of (n, 1, h_unfold*w_fold, h*w).
A simple specification for horizontal direction is shown below:
.. code-block:: python
(h, (h, (h, (h,
0) 1) 2) 3) ...
(h, 0)
(h, 1) 1
(h, 2) 1
(h, 3) 1
...
"""
# horizontal direction
n, _, h_map, w_map = correlation_map.size()
map_ = correlation_map.permute(0, 2, 3, 1)
map_ = map_.reshape(n, h_map * w_map, h_unfold * w_unfold, 1)
map_ = map_.permute(0, 3, 1, 2).contiguous()
map_ = self.fuse_conv(map_, self.fuse_kernel)
correlation_map = map_.view(n, h_unfold, w_unfold, h_map, w_map)
# vertical direction
map_ = correlation_map.permute(0, 2, 1, 4,
3).reshape(n, 1, h_unfold * w_unfold,
h_map * w_map)
map_ = self.fuse_conv(map_, self.fuse_kernel)
# Note that the dimension should be transposed since the convolution of
# eye matrix will put the normed scores into the last several dimension
correlation_map = map_.view(n, w_unfold, h_unfold, w_map,
h_map).permute(0, 4, 3, 2, 1)
correlation_map = correlation_map.reshape(n, -1, h_unfold, w_unfold)
return correlation_map
def calculate_unfold_hw(self,
input_size,
kernel_size=3,
stride=1,
dilation=1,
padding=0):
"""Calculate (h, w) after unfolding
The official implementation of `unfold` in pytorch will put the
dimension (h, w) into `L`. Thus, this function is just to calculate the
(h, w) according to the equation in:
https://pytorch.org/docs/stable/nn.html#torch.nn.Unfold
"""
h_in, w_in = input_size
h_unfold = int((h_in + 2 * padding - dilation *
(kernel_size - 1) - 1) / stride + 1)
w_unfold = int((w_in + 2 * padding - dilation *
(kernel_size - 1) - 1) / stride + 1)
return h_unfold, w_unfold
def calculate_overlap_factor(self, attention_score):
"""Calculate the overlap factor after applying deconv.
Args:
attention_score (torch.Tensor): The attention score with shape of
(n, c, h, w).
Returns:
torch.Tensor: The overlap factor will be returned.
"""
h, w = attention_score.shape[-2:]
kernel_size = self.unfold_raw_kernel_size
ones_input = torch.ones(1, 1, h, w).to(attention_score)
ones_filter = torch.ones(1, 1, kernel_size,
kernel_size).to(attention_score)
overlap = F.conv_transpose2d(
ones_input,
ones_filter,
stride=self.unfold_raw_stride,
padding=self.unfold_raw_padding)
# avoid division by zero
overlap[overlap == 0] = 1.
return overlap
def mask_correlation_map(self, correlation_map, mask):
"""Add mask weight for correlation map.
Add a negative infinity number to the masked regions so that softmax
function will result in 'zero' in those regions.
Args:
correlation_map (torch.Tensor): Correlation map with shape of
(n, h_unfold*w_unfold, h_map, w_map).
mask (torch.Tensor): Mask tensor with shape of (n, c, h, w). '1'
in the mask indicates masked region while '0' indicates valid
region.
Returns:
torch.Tensor: Updated correlation map with mask.
"""
if mask is not None:
mask = F.interpolate(mask, scale_factor=self.scale)
# if any pixel is masked in patch, the patch is considered to be
# masked
mask_cols = self.im2col(
mask,
kernel_size=self.unfold_corr_kernel_size,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation)
mask_cols = (mask_cols.sum(dim=1, keepdim=True) > 0).float()
mask_cols = mask_cols.permute(0, 2,
1).reshape(mask.size(0), -1, 1, 1)
# add negative inf will bring zero in softmax
mask_cols[mask_cols == 1] = -float('inf')
correlation_map += mask_cols
return correlation_map
def im2col(self,
img,
kernel_size,
stride=1,
padding=0,
dilation=1,
normalize=False,
return_cols=False):
"""Reshape image-style feature to columns.
This function is used for unfold feature maps to columns. The
details of this function can be found in:
https://pytorch.org/docs/1.1.0/nn.html?highlight=unfold#torch.nn.Unfold
Args:
img (torch.Tensor): Features to be unfolded. The shape of this
feature should be (n, c, h, w).
kernel_size (int): In this function, we only support square kernel
with same height and width.
stride (int): Stride number in unfolding. Default: 1.
padding (int): Padding number in unfolding. Default: 0.
dilation (int): Dilation number in unfolding. Default: 1.
normalize (bool): If True, the unfolded feature will be normalized.
Default: False.
return_cols (bool): The official implementation in PyTorch of
unfolding will return features with shape of
(n, c*$prod{kernel_size}$, L). If True, the features will be
reshaped to (n, L, c, kernel_size, kernel_size). Otherwise,
the results will maintain the shape as the official
implementation.
Returns:
torch.Tensor: Unfolded columns. If `return_cols` is True, the \
shape of output tensor is \
`(n, L, c, kernel_size, kernel_size)`. Otherwise, the shape \
will be `(n, c*$prod{kernel_size}$, L)`.
"""
# unfold img to columns with shape (n, c*kernel_size**2, num_cols)
img_unfold = F.unfold(
img,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation)
# normalize the feature map
if normalize:
norm = torch.sqrt((img_unfold**2).sum(dim=1, keepdim=True))
eps = torch.tensor([1e-4]).to(img)
img_unfold = img_unfold / torch.max(norm, eps)
if return_cols:
img_unfold_ = img_unfold.permute(0, 2, 1)
n, num_cols = img_unfold_.size()[:2]
img_cols = img_unfold_.view(n, num_cols, img.size(1), kernel_size,
kernel_size)
return img_cols
return img_unfold | PypiClean |
/FAST-2.5.tar.gz/FAST-2.5/server/dbload/README | Stefan Chakerian
Notes on tables.sql
tables.sql has the table structures.
The fields are fairly self-explanatory. If the field names
of different tables are the same, chances are good that one
table (e.g. scenario_id in the study table) is designed to
reference the other table with joins.
You should run mysql to figure out the select queries.
e.g.
$ mysql -u acrodbadmin -p acro
select * from package;
select * from artifact where status is not null order by status;
select * from tstatus where analysis_id=20;
select * from tstatus where analysis_id=709 order by comb+0;
Exact uses special nodes for configs and builds, which include
things like the output file and status of the run. The problem
is that if you want to store any other information, you have to
mess with the exact datastructures and add a new node class.
Instead of duplicating this, which is limited to just config and build
files, I created a table called "artifact". If Exact is rewritten to
allow arbitrary artifacts from tests (which needs to be done), it will
be easy to convert it to this database.
The artifact table is for storing "extra" information. In this case,
it's the build and config output. You can describe this with the ENUM
in the artifact table:
type ENUM("Error", "Config","Build","File") NOT NULL,
If it's config information, type should be set to "config", etc.
If you want to add more types, always add to the END of the ENUM,
never the middle... I don't know what'll happen to the database if
you try to ALTER TABLE and add anything to the middle of an enum.
"File" is for arbitrary files you want associated with this artifact.
Currently, all the logs and xml files are associated with an artifact,
which is associated with a scenario_id. So, you can find out all the
files of a particular scenario by joining scenario_id with artifact
to get the artifact_id, and joining that to file.
There are also arbitrary notes you can associate with the artifact.
There can be multiple notes, so you'd select on artnotes
based on the artifact_id. (Currently, artnote is used to store
flags and warning messages stored by the config and build artifacts,
but it can associate any text to an artifact. It, too, has a type
field which can be updated.) | PypiClean |
/MetaCerberus-1.1.tar.gz/MetaCerberus-1.1/lib/metacerberus_parser.py | def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import os
from pathlib import Path
import pandas as pd
def parseHmmer(hmm_tsv, config, subdir):
path = Path(config['DIR_OUT'], subdir)
path.mkdir(exist_ok=True, parents=True)
done = path / "complete"
if not config['REPLACE'] and done.exists():
rollup_files = dict()
for name in ['FOAM', 'KEGG', 'COG', 'CAZy', 'PHROG', 'VOG']:
outfile = Path(path, f"HMMER_BH_{name}_rollup.tsv")
if outfile.exists():
rollup_files[name] = outfile
return rollup_files
done.unlink(missing_ok=True)
minscore = config["MINSCORE"]
top5File = Path(path, "HMMER_top_5.tsv")
# Calculate Best Hit
BH_query = {}
BH_top5 = {}
#"target", "query", "e-value", "score", "length", "start", "end"
with open(hmm_tsv, "r") as reader:
for line in reader:
line = line.split('\t')
try:
target = line[0]
query = line[1]
e_value = line[2]
line[3] = float(line[3])
score = line[3]
length = int(line[4])
start = int(line[5])
end = int(line[6])
except:
continue
if score < minscore: # Skip scores less than minscore
print("DEBUG: MINSCORE DETECTED")
continue
# store top 5 per query
if query not in BH_top5:
BH_top5[query] = [line]
elif len(BH_top5[query]) < 5:
BH_top5[query].append(line)
else:
BH_top5[query].sort(key = lambda x: x[3], reverse=False)
if score > float(BH_top5[query][0][3]):
BH_top5[query][0] = line
# Check for Best Score per query
if query not in BH_query:
BH_query[query] = line
elif score > float(BH_query[query][3]):
BH_query[query] = line
# Save Top 5 hits tsv rollup
with top5File.open('w') as writer:
print("Target Name", "ID", "EC value", "E-Value (sequence)", "Score (domain)", file=writer, sep='\t')
for query in sorted(BH_top5.keys()):
BH_top5[query].sort(key = lambda x: x[3], reverse=True)
for line in BH_top5[query]:
id = [i for i in line[1].split(',')]
ec = []
print(line[0], ','.join(id), ','.join(ec), line[2], line[3], file=writer, sep='\t')
# Create dictionary with found IDs and counts
ID_counts = {}
for line in BH_query.values():
IDs = [ID for ID in line[1].split(",")]
for ID in IDs:
if ID not in ID_counts:
ID_counts[ID] = 0
ID_counts[ID] += 1
# Write rollup files to disk
dbPath = Path(config['PATHDB'])
dfRollups = rollupAll(ID_counts, dbPath, path)
rollup_files = dict()
for name,df in dfRollups.items():
if len(df.index) > 1:
outfile = Path(path, f"HMMER_BH_{name}_rollup.tsv")
df.to_csv(outfile, index=False, header=True, sep='\t')
rollup_files[name] = outfile
done.touch()
return rollup_files
######### Roll-Up All #########
def rollupAll(COUNTS: dict, lookupPath: str, outpath: str):
dfLookup = dict()
dfRollup = dict()
count_file = dict()
for name in ['FOAM', 'KEGG', 'COG', 'CAZy', 'PHROG', 'VOG']:
count_file[name] = Path(outpath, f'counts_{name}.tsv').open('w')
print('ID', 'count', sep='\t', file=count_file[name])
dbPath = Path(lookupPath, f"{name}-onto_rel1.tsv")
dfLookup[name] = pd.read_csv(dbPath, sep='\t').fillna('')
dfRollup[name] = pd.DataFrame()
errfile = os.path.join(outpath, 'lookup.err')
with open(errfile, 'w') as errlog:
for ID,count in sorted(COUNTS.items()):
found = False
for name in ['FOAM', 'KEGG', 'COG', 'CAZy', 'PHROG', 'VOG']:
rows = pd.DataFrame(dfLookup[name][dfLookup[name].ID==ID])
if not rows.empty:
found = True
rows.drop(rows[rows['Function']==''].index, inplace=True)
if rows.empty:
print("WARNING:'", ID, "'Does not have a 'Function' in the Lookup File:", name, file=errlog)
continue
print(ID, count, sep='\t', file=count_file[name])
rows['Count'] = count
dfRollup[name] = pd.concat([dfRollup[name],rows])
if not found:
print("WARNING:'", ID, "'not found in any Lookup File", file=errlog)
continue
return dfRollup
########## Counts Table #########
def createCountTables(rollup_files:dict, config:dict, subdir: str):
done = Path(config['DIR_OUT']) / subdir / "complete"
dfCounts = dict()
for dbName,filepath in rollup_files.items():
outpath = Path(config['DIR_OUT'], subdir, f"{dbName}-rollup_counts.tsv")
if not config['REPLACE'] and done.exists() and outpath.exists():
dfCounts[dbName] = outpath
continue
done.unlink(missing_ok=True)
try:
df = pd.read_csv(filepath, sep='\t')
except:
continue
dictCount = {}
for i,row in df.iterrows():
for colName,colData in row.items():
if not colName.startswith('L'):
continue
level = colName[1]
name = colData
if name:
name = f"lvl{level}: {name}"
if name not in dictCount:
dictCount[name] = [level, 0, ""]
dictCount[name][1] += row['Count']
name = row.Function
if not name:
continue
name = f"{row.ID}: {name}"
if name not in dictCount:
dictCount[name] = ['Function', 0, row.ID]
dictCount[name][1] += row['Count']
data = {
'Id':[x[2] for x in dictCount.values()],
'Name':list(dictCount.keys()),
'Level':[x[0] for x in dictCount.values()],
'Count':[x[1] for x in dictCount.values()]}
df = pd.DataFrame(data=data)
df.fillna(0, inplace=True)
df.to_csv(outpath, index=False, header=True, sep='\t')
dfCounts[dbName] = outpath
done.touch()
return dfCounts
# Merge TSV Files
def merge_tsv(tsv_list:dict, out_file:Path):
names = sorted(list(tsv_list.keys()))
file_list = dict()
lines = dict()
IDS = set()
for name in names:
file_list[name] = open(tsv_list[name])
file_list[name].readline() # skip header
lines[name] = file_list[name].readline().split()
if lines[name]:
IDS.add(lines[name][0])
if len(IDS) == 0: # Fail if nothing to merge
return False
with open(out_file, 'w') as writer:
print("ID", '\t'.join(names), sep='\t', file=writer)
while IDS:
ID = sorted(IDS)[0]
IDS.remove(ID)
line = [ID]
for name in names:
if not lines[name]: # End of file
line.append('0')
elif lines[name][0] > ID: # File ID comes after current
line.append('0')
else:
line.append(lines[name][1])
lines[name] = file_list[name].readline().split()
if lines[name]:
IDS.add(lines[name][0])
print('\t'.join(line), file=writer)
for name in names:
file_list[name].close()
return True | PypiClean |
/CodeIntel-2.0.0b19-cp34-cp34m-macosx_10_12_x86_64.whl/codeintel/kotaclib.py | from __future__ import absolute_import
import re
class KoTACSearch(object):
"""Base class for all KoTAC*Search classes.
All bases classes must override:
- all _reg_*_ and _com_interfaces_ attributes
- startSearch() method
Utility methods:
- parseSearchParam()
"""
#_com_interfaces_ = [components.interfaces.nsIAutoCompleteSearch]
#_reg_clsid_ = "{<guid>}"
#_reg_contractid_ = "@mozilla.org/autocomplete/search;1?name=<name>"
#_reg_desc_ = "<desc>"
def startSearch(self, searchString, searchParam, previousResult, listener):
"""Synchronously or asynchronously search for the given search string.
searchString (str)
searchParam (str)
previousResult (nsIAutoCompleteResult)
listener (nsIAutoCompleteObserver)
The result of the search should be reported via the "listener":
void onSearchResult(in nsIAutoCompleteSearch search, in nsIAutoCompleteResult result);
AutoComplete search best practices:
- If possible limit the search to just the set in
"previousResult". I.e. if the last search was for "fo" and
this one is for "foo", then searching within the previous
result might be faster. If so, then don't need to create a new
nsIAutoCompleteResult: just pare down the "previousResult" and
pass it back.
"""
raise NotImplementedError("virtual base method")
def stopSearch(self):
"""This is sent by the autocomplete controller to stop a
possible previous asynchronous search.
"""
pass
search_param_pats = [
# Matches un-quoted params.
re.compile(r'''(?P<key>[\w-]+):\s*()(?P<name>[^'";]+)\s*;?'''),
# Matches quoted params.
re.compile(r'''(?P<key>[\w-]+):\s*(['"])(?P<name>.*?)(?<!\\)\2\s*;?'''),
]
def parseSearchParam(self, searchParam):
"""Parse the given CSS-like search parameter (i.e. the value of
the 'autocompletesearchparam' attribute of the <textbox> element).
To support more than one piece of data, some TAC searches use a
CSS-like search param.
>>> parseSearchParam("foo: bar")
{'foo': 'bar'}
>>> parseSearchParam("foo-bar: baz qxz; batman: 'pif pow'")
{'batman': 'pif pow', 'foo-bar': 'baz qxz'}
>>> parseSearchParam(r'''robin: 'holy \\'cow\\''; batman: 'pif "pow"';''')
{'batman': 'pif "pow"', 'robin': "holy 'cow'"}
Returns a dict of name/value pairs.
"""
data = {}
for search_param_pat in self.search_param_pats:
for name, quote, value in search_param_pat.findall(searchParam):
data[name] = _unescape_quotes(value)
return data
class KoTACMatch(object):
"""A class representing a single textbox autocomplete hit/match.
These are used by a koIAutoCompleteResult like this:
result = components.classes["@activestate.com/autocomplete/result;1"] \
.createInstance(components.interfaces.koIAutoCompleteResult)
result.init(<search-string>)
for hit in <find-hits>:
result.addMatch(KoTACMatch(...))
See `KoTACResult` docstring (in "koTextboxAutoComplete.py") for details.
"""
#_com_interfaces_ = [components.interfaces.koIAutoCompleteMatch]
#TODO: remove these if not necessary
#_reg_clsid_ = "{5AEEBCBF-6C23-4765-9113-2B3C6D52D44E}"
#_reg_contractid_ = "@activestate.com/autocomplete/match;1"
#_reg_desc_ = "Komodo textbox autocomplete search match"
value = None
comment = None
style = None
isDefault = False
image = None
def __init__(self, value=None, comment=None, style=None, isDefault=False, image=None):
self.value = value
self.comment = comment
self.style = style
self.isDefault = isDefault
self.image = image
#---- internal support routines
def _unescape_quotes(s):
return s.replace("\\'", "'").replace('\\"', '"') | PypiClean |
/EDA-assistant-0.0.4.tar.gz/EDA-assistant-0.0.4/eda_assistant/eda_assistant.py | import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from eda_assistant import _create_tables
from eda_assistant import _create_graphs
from eda_assistant import _format_eda_report
import os
import webbrowser
class EDA:
"""
A class representing an instance of EDA. This can be used to create
an eda report.
...
Attributes:
----------
file_name: str
File name of dataset to perform EDA on
Methods:
-------
create_eda_report(save_file_name):
Generates a pdf file containing eda summary statistic
calculation tables and graphs for the dataset. Saves file
as a pdf named save_file_name and opens the report
"""
def __init__(self, file_path_name):
"""
Constructs all necessary attributes for the EDA object.
Parameters:
----------
file_path_name : str
File path name of data set to perform EDA on
"""
if not os.path.exists(file_path_name):
raise Exception('Could not find file. File does not exist')
else:
df = pd.read_csv(file_path_name)
self.df = df
def create_eda_report(self, save_file_name):
"""
Generates a pdf file containing eda summary statistic calculation
tables and graphs for the dataset. Saves eda pdf file to current
working directory named save_file_name. Opens file for user to
preview information.
Parameters:
save_file_name (str): Name of file to save pdf report as.
File name must end in '.pdf'
Returns:
None
"""
if len(self.df) == 0:
raise Exception('DataFrame is empty. Unable to create report.')
else:
with PdfPages(save_file_name) as pdf:
df_summary = _create_tables.create_df_summary(self.df)
df_summary_table = _format_eda_report.\
format_report_df_table(df_summary)
pdf.savefig(df_summary_table, bbox_inches='tight',
pad_inches=2.5)
plt.close()
var_summary = _create_tables.create_var_summary(self.df)
var_summary_table = _format_eda_report.\
format_report_var_table(var_summary)
pdf.savefig(var_summary_table, bbox_inches='tight',
pad_inches=2)
plt.close()
numeric_hist = _create_graphs.plot_numeric_hist(self.df)
if numeric_hist is not None:
pdf.savefig(numeric_hist, bbox_inches='tight',
pad_inches=2.5)
plt.close()
categorical_bar = _create_graphs.plot_categorical_bar(self.df)
if categorical_bar is not None:
pdf.savefig(categorical_bar, bbox_inches='tight',
pad_inches=2.5)
plt.close()
corr_graph = _create_graphs.plot_corr_graph(self.df)
if corr_graph is not None:
pdf.savefig(corr_graph, bbox_inches='tight',
pad_inches=1.5)
plt.close()
pair_graph = _create_graphs.plot_pair_graph(self.df)
if pair_graph is not None:
pdf.savefig(pair_graph, bbox_inches='tight',
pad_inches=1.5)
plt.close()
save_file_location = os.getcwd() + '/' + save_file_name
webbrowser.open_new(r'file://C:' + save_file_location) | PypiClean |
/FLORIS-3.4.1.tar.gz/FLORIS-3.4.1/floris/tools/optimization/legacy/scipy/yaw_wind_rose_clustered.py |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import copy
import numpy as np
import pandas as pd
from ....logging_manager import LoggerBase
from .cluster_turbines import cluster_turbines
from .yaw_wind_rose import YawOptimizationWindRose
class YawOptimizationWindRoseClustered(YawOptimizationWindRose, LoggerBase):
"""
YawOptimizationWindRose is a subclass of
:py:class:`~.tools.optimizationscipy.YawOptimizationWindRose` that is used
to perform optimizations of the yaw angles of all or a subset of wind
turbines in a Floris Farm for multiple sets of inflow conditions using the
scipy optimization package. This class facilitates the clusterization of the
turbines inside seperate subsets in which the turbines witin each subset
exclusively interact with one another and have no impact on turbines
in other clusters. This may significantly reduce the computational
burden at no loss in performance (assuming the turbine clusters are truly
independent).
"""
def __init__(
self,
fi,
wd,
ws,
ti=None,
minimum_yaw_angle=0.0,
maximum_yaw_angle=25.0,
minimum_ws=3.0,
maximum_ws=25.0,
yaw_angles_baseline=None,
x0=None,
bnds=None,
opt_method="SLSQP",
opt_options=None,
include_unc=False,
unc_pmfs=None,
unc_options=None,
turbine_weights=None,
verbose=False,
calc_init_power=True,
exclude_downstream_turbines=False,
clustering_wake_slope=0.30,
):
"""
Instantiate YawOptimizationWindRose object with a FlorisInterface object
and assign parameter values.
Args:
fi (:py:class:`~.tools.floris_interface.FlorisInterface`):
Interface used to interact with the Floris object.
wd (iterable) : The wind directions for which the yaw angles are
optimized (deg).
ws (iterable): The wind speeds for which the yaw angles are
optimized (m/s).
ti (iterable, optional): An optional list of turbulence intensity
values for which the yaw angles are optimized. If not
specified, the current TI value in the Floris object will be
used for all optimizations. Defaults to None.
minimum_yaw_angle (float, optional): Minimum constraint on yaw
angle (deg). This value will be ignored if bnds is also
specified. Defaults to 0.0.
maximum_yaw_angle (float, optional): Maximum constraint on yaw
angle (deg). This value will be ignored if bnds is also
specified. Defaults to 25.0.
minimum_ws (float, optional): Minimum wind speed at which
optimization is performed (m/s). Assumes zero power generated
below this value. Defaults to 3.
maximum_ws (float, optional): Maximum wind speed at which
optimization is performed (m/s). Assumes optimal yaw offsets
are zero above this wind speed. Defaults to 25.
yaw_angles_baseline (iterable, optional): The baseline yaw
angles used to calculate the initial and baseline power
production in the wind farm and used to normalize the cost
function. If none are specified, this variable is set equal
to the current yaw angles in floris. Note that this variable
need not meet the yaw constraints specified in self.bnds,
yet a warning is raised if it does to inform the user.
Defaults to None.
x0 (iterable, optional): The initial guess for the optimization
problem. These values must meet the constraints specified
in self.bnds. Note that, if exclude_downstream_turbines=True,
the initial guess for any downstream turbines are ignored
since they are not part of the optimization. Instead, the yaw
angles for those turbines are 0.0 if that meets the lower and
upper bound, or otherwise as close to 0.0 as feasible. If no
values for x0 are specified, x0 is set to be equal to zeros
wherever feasible (w.r.t. the bounds), and equal to the
average of its lower and upper bound for all non-downstream
turbines otherwise. Defaults to None.
bnds (iterable, optional): Bounds for the yaw angles, as tuples of
min, max values for each turbine (deg). One can fix the yaw
angle of certain turbines to a predefined value by setting that
turbine's lower bound equal to its upper bound (i.e., an
equality constraint), as: bnds[ti] = (x, x), where x is the
fixed yaw angle assigned to the turbine. This works for both
zero and nonzero yaw angles. Moreover, if
exclude_downstream_turbines=True, the yaw angles for all
downstream turbines will be 0.0 or a feasible value closest to
0.0. If none are specified, the bounds are set to
(minimum_yaw_angle, maximum_yaw_angle) for each turbine. Note
that, if bnds is not none, its values overwrite any value given
in minimum_yaw_angle and maximum_yaw_angle. Defaults to None.
opt_method (str, optional): The optimization method used by
scipy.optimize.minize. Defaults to 'SLSQP'.
opt_options (dictionary, optional): Optimization options used by
scipy.optimize.minize. If none are specified, they are set to
{'maxiter': 100, 'disp': False, 'iprint': 1, 'ftol': 1e-7,
'eps': 0.01}. Defaults to None.
include_unc (bool, optional): Determines whether wind direction or
yaw uncertainty are included. If True, uncertainty in wind
direction and/or yaw position is included when determining
wind farm power. Uncertainty is included by computing the
mean wind farm power for a distribution of wind direction
and yaw position deviations from the intended wind direction
and yaw angles. Defaults to False.
unc_pmfs (dictionary, optional): A dictionary containing
probability mass functions describing the distribution of
wind direction and yaw position deviations when wind direction
and/or yaw position uncertainty is included in the power
calculations. Contains the following key-value pairs:
- **wd_unc** (*np.array*): The wind direction
deviations from the intended wind direction (deg).
- **wd_unc_pmf** (*np.array*): The probability
of each wind direction deviation in **wd_unc** occuring.
- **yaw_unc** (*np.array*): The yaw angle deviations
from the intended yaw angles (deg).
- **yaw_unc_pmf** (*np.array*): The probability
of each yaw angle deviation in **yaw_unc** occuring.
If none are specified, default PMFs are calculated using
values provided in **unc_options**. Defaults to None.
unc_options (dictionary, optional): A dictionary containing values
used to create normally-distributed, zero-mean probability mass
functions describing the distribution of wind direction and yaw
position deviations when wind direction and/or yaw position
uncertainty is included. This argument is only used when
**unc_pmfs** is None and contains the following key-value pairs:
- **std_wd** (*float*): The standard deviation of
the wind direction deviations from the original wind
direction (deg).
- **std_yaw** (*float*): The standard deviation of
the yaw angle deviations from the original yaw angles (deg).
- **pmf_res** (*float*): The resolution in degrees
of the wind direction and yaw angle PMFs.
- **pdf_cutoff** (*float*): The cumulative
distribution function value at which the tails of the
PMFs are truncated.
If none are specified, default values of
{'std_wd': 4.95, 'std_yaw': 1.75, 'pmf_res': 1.0,
'pdf_cutoff': 0.995} are used. Defaults to None.
turbine_weights (iterable, optional): weighing terms that allow
the user to emphasize power gains at particular turbines or
completely ignore power gains from other turbines. The array
of turbine powers from floris is multiplied with this array
in the calculation of the objective function. If None, this
is an array with all values 1.0 and length equal to the
number of turbines. Defaults to None.
calc_init_power (bool, optional): If True, calculates initial
wind farm power for each set of wind conditions. Defaults to
True.
exclude_downstream_turbines (bool, optional): If True,
automatically finds and excludes turbines that are most
downstream from the optimization problem. This significantly
reduces computation time at no loss in performance. The yaw
angles of these downstream turbines are fixed to 0.0 deg if
the yaw bounds specified in self.bnds allow that, or otherwise
are fixed to the lower or upper yaw bound, whichever is closer
to 0.0. Defaults to False.
clustering_wake_slope (float, optional): linear slope of the wake
in the simplified linear expansion wake model (dy/dx). This
model is used to derive wake interactions between turbines and
to identify the turbine clusters. A good value is about equal
to the turbulence intensity in FLORIS. Though, since yaw
optimizations may shift the wake laterally, a safer option
is twice the turbulence intensity. The default value is 0.30
which should be valid for yaw optimizations at wd_std = 0.0 deg
and turbulence intensities up to 15%. Defaults to 0.30.
"""
super().__init__(
fi=fi,
wd=wd,
ws=ws,
ti=ti,
minimum_yaw_angle=minimum_yaw_angle,
maximum_yaw_angle=maximum_yaw_angle,
minimum_ws=minimum_ws,
maximum_ws=maximum_ws,
yaw_angles_baseline=yaw_angles_baseline,
x0=x0,
bnds=bnds,
opt_method=opt_method,
opt_options=opt_options,
include_unc=include_unc,
unc_pmfs=unc_pmfs,
unc_options=unc_options,
turbine_weights=turbine_weights,
verbose=verbose,
calc_init_power=calc_init_power,
exclude_downstream_turbines=exclude_downstream_turbines,
)
self.clustering_wake_slope = clustering_wake_slope
def _cluster_turbines(self):
wind_directions = self.fi.floris.farm.wind_direction
if (np.std(wind_directions) > 0.001):
raise ValueError("Wind directions must be uniform for clustering algorithm.")
self.clusters = cluster_turbines(
fi=self.fi,
wind_direction=self.fi.floris.farm.wind_direction[0],
wake_slope=self.clustering_wake_slope
)
def plot_clusters(self):
for wd in self.wd:
cluster_turbines(
fi=self.fi,
wind_direction=wd,
wake_slope=self.clustering_wake_slope,
plot_lines=True
)
def optimize(self):
"""
This method solves for the optimum turbine yaw angles for power
production and the resulting power produced by the wind farm for a
series of wind speed, wind direction, and optionally TI combinations.
Returns:
pandas.DataFrame: A pandas DataFrame with the same number of rows
as the length of the wd and ws arrays, containing the following
columns:
- **ws** (*float*) - The wind speed values for which the yaw
angles are optimized and power is computed (m/s).
- **wd** (*float*) - The wind direction values for which the
yaw angles are optimized and power is computed (deg).
- **ti** (*float*) - The turbulence intensity values for which
the yaw angles are optimized and power is computed. Only
included if self.ti is not None.
- **power_opt** (*float*) - The total power produced by the
wind farm with optimal yaw offsets (W).
- **turbine_power_opt** (*list* (*float*)) - A list
containing the power produced by each wind turbine with optimal
yaw offsets (W).
- **yaw_angles** (*list* (*float*)) - A list containing
the optimal yaw offsets for maximizing total wind farm power
for each wind turbine (deg).
"""
print("=====================================================")
print("Optimizing wake redirection control...")
print("Number of wind conditions to optimize = ", len(self.wd))
print("Number of yaw angles to optimize = ", len(self.turbs_to_opt))
print("=====================================================")
df_opt = pd.DataFrame()
for i in range(len(self.wd)):
if self.verbose:
if self.ti is None:
print(
"Computing wind speed, wind direction pair "
+ str(i)
+ " out of "
+ str(len(self.wd))
+ ": wind speed = "
+ str(self.ws[i])
+ " m/s, wind direction = "
+ str(self.wd[i])
+ " deg."
)
else:
print(
"Computing wind speed, wind direction, turbulence "
+ "intensity set "
+ str(i)
+ " out of "
+ str(len(self.wd))
+ ": wind speed = "
+ str(self.ws[i])
+ " m/s, wind direction = "
+ str(self.wd[i])
+ " deg, turbulence intensity = "
+ str(self.ti[i])
+ "."
)
# Optimizing wake redirection control
if (self.ws[i] >= self.minimum_ws) & (self.ws[i] <= self.maximum_ws):
if self.ti is None:
self.fi.reinitialize_flow_field(
wind_direction=[self.wd[i]], wind_speed=[self.ws[i]]
)
else:
self.fi.reinitialize_flow_field(
wind_direction=[self.wd[i]],
wind_speed=[self.ws[i]],
turbulence_intensity=self.ti[i],
)
# Set initial farm power
self.initial_farm_power = self.initial_farm_powers[i]
# Determine clusters and then optimize by cluster
self._cluster_turbines()
if self.verbose:
print("Clustered turbines into %d separate clusters." % len(self.clusters))
# Save parameters to a full list
yaw_angles_template_full = copy.copy(self.yaw_angles_template)
yaw_angles_baseline_full = copy.copy(self.yaw_angles_baseline)
turbine_weights_full = copy.copy(self.turbine_weights)
bnds_full = copy.copy(self.bnds)
# nturbs_full = copy.copy(self.nturbs)
x0_full = copy.copy(self.x0)
fi_full = copy.deepcopy(self.fi)
# Overwrite parameters for each cluster and optimize
opt_yaw_angles = np.zeros_like(x0_full)
for ci, cl in enumerate(self.clusters):
if self.verbose:
print("=====================================================")
print("Optimizing %d parameters in cluster %d." % (len(cl), ci))
print("=====================================================")
self.yaw_angles_template = np.array(yaw_angles_template_full)[cl]
self.yaw_angles_baseline = np.array(yaw_angles_baseline_full)[cl]
self.turbine_weights = np.array(turbine_weights_full)[cl]
self.bnds = np.array(bnds_full)[cl]
self.x0 = np.array(x0_full)[cl]
self.fi = copy.deepcopy(fi_full)
self.fi.reinitialize_flow_field(
layout_array=[
np.array(fi_full.layout_x)[cl],
np.array(fi_full.layout_y)[cl]
]
)
opt_yaw_angles[cl] = self._optimize()
# Restore parameters
self.yaw_angles_template = yaw_angles_template_full
self.yaw_angles_baseline = yaw_angles_baseline_full
self.turbine_weights = turbine_weights_full
self.bnds = bnds_full
self.x0 = x0_full
self.fi = fi_full
self.fi.reinitialize_flow_field(
layout_array=[
np.array(fi_full.layout_x),
np.array(fi_full.layout_y)
]
)
if np.sum(np.abs(opt_yaw_angles)) == 0:
print(
"No change in controls suggested for this inflow \
condition..."
)
# optimized power
self.fi.calculate_wake(yaw_angles=opt_yaw_angles)
power_opt = self.fi.get_turbine_power(
include_unc=self.include_unc,
unc_pmfs=self.unc_pmfs,
unc_options=self.unc_options,
)
elif self.ws[i] >= self.maximum_ws:
print(
"No change in controls suggested for this inflow \
condition..."
)
if self.ti is None:
self.fi.reinitialize_flow_field(
wind_direction=[self.wd[i]], wind_speed=[self.ws[i]]
)
else:
self.fi.reinitialize_flow_field(
wind_direction=[self.wd[i]],
wind_speed=[self.ws[i]],
turbulence_intensity=self.ti[i],
)
opt_yaw_angles = np.array(self.yaw_angles_template, copy=True)
self.fi.calculate_wake(yaw_angles=opt_yaw_angles)
power_opt = self.fi.get_turbine_power(
include_unc=self.include_unc,
unc_pmfs=self.unc_pmfs,
unc_options=self.unc_options,
)
else:
print(
"No change in controls suggested for this inflow \
condition..."
)
opt_yaw_angles = np.array(self.yaw_angles_template, copy=True)
power_opt = self.nturbs * [0.0]
# Include turbine weighing terms
power_opt = np.multiply(self.turbine_weights, power_opt)
# add variables to dataframe
if self.ti is None:
df_opt = df_opt.append(
pd.DataFrame(
{
"ws": [self.ws[i]],
"wd": [self.wd[i]],
"power_opt": [np.sum(power_opt)],
"turbine_power_opt": [power_opt],
"yaw_angles": [opt_yaw_angles],
}
)
)
else:
df_opt = df_opt.append(
pd.DataFrame(
{
"ws": [self.ws[i]],
"wd": [self.wd[i]],
"ti": [self.ti[i]],
"power_opt": [np.sum(power_opt)],
"turbine_power_opt": [power_opt],
"yaw_angles": [opt_yaw_angles],
}
)
)
df_opt.reset_index(drop=True, inplace=True)
return df_opt | PypiClean |
/Js2Py-0.74.tar.gz/Js2Py-0.74/js2py/internals/constructors/jsdate.py | from ..base import *
from .time_helpers import *
TZ_OFFSET = (time.altzone // 3600)
ABS_OFFSET = abs(TZ_OFFSET)
TZ_NAME = time.tzname[1]
ISO_FORMAT = '%s-%s-%sT%s:%s:%s.%sZ'
@Js
def Date(year, month, date, hours, minutes, seconds, ms):
return now().to_string()
Date.Class = 'Date'
def now():
return PyJsDate(int(time.time() * 1000), prototype=DatePrototype)
@Js
def UTC(year, month, date, hours, minutes, seconds, ms): # todo complete this
args = arguments
y = args[0].to_number()
m = args[1].to_number()
l = len(args)
dt = args[2].to_number() if l > 2 else Js(1)
h = args[3].to_number() if l > 3 else Js(0)
mi = args[4].to_number() if l > 4 else Js(0)
sec = args[5].to_number() if l > 5 else Js(0)
mili = args[6].to_number() if l > 6 else Js(0)
if not y.is_nan() and 0 <= y.value <= 99:
y = y + Js(1900)
return TimeClip(MakeDate(MakeDay(y, m, dt), MakeTime(h, mi, sec, mili)))
@Js
def parse(string):
return PyJsDate(
TimeClip(parse_date(string.to_string().value)),
prototype=DatePrototype)
Date.define_own_property('now', {
'value': Js(now),
'enumerable': False,
'writable': True,
'configurable': True
})
Date.define_own_property('parse', {
'value': parse,
'enumerable': False,
'writable': True,
'configurable': True
})
Date.define_own_property('UTC', {
'value': UTC,
'enumerable': False,
'writable': True,
'configurable': True
})
class PyJsDate(PyJs):
Class = 'Date'
extensible = True
def __init__(self, value, prototype=None):
self.value = value
self.own = {}
self.prototype = prototype
# todo fix this problematic datetime part
def to_local_dt(self):
return datetime.datetime(1970, 1, 1) + datetime.timedelta(
seconds=UTCToLocal(self.value) // 1000)
def to_utc_dt(self):
return datetime.datetime(1970, 1, 1) + datetime.timedelta(
seconds=self.value // 1000)
def local_strftime(self, pattern):
if self.value is NaN:
return 'Invalid Date'
try:
dt = self.to_local_dt()
except:
raise MakeError(
'TypeError',
'unsupported date range. Will fix in future versions')
try:
return dt.strftime(pattern)
except:
raise MakeError(
'TypeError',
'Could not generate date string from this date (limitations of python.datetime)'
)
def utc_strftime(self, pattern):
if self.value is NaN:
return 'Invalid Date'
try:
dt = self.to_utc_dt()
except:
raise MakeError(
'TypeError',
'unsupported date range. Will fix in future versions')
try:
return dt.strftime(pattern)
except:
raise MakeError(
'TypeError',
'Could not generate date string from this date (limitations of python.datetime)'
)
def parse_date(py_string): # todo support all date string formats
try:
try:
dt = datetime.datetime.strptime(py_string, "%Y-%m-%dT%H:%M:%S.%fZ")
except:
dt = datetime.datetime.strptime(py_string, "%Y-%m-%dT%H:%M:%SZ")
return MakeDate(
MakeDay(Js(dt.year), Js(dt.month - 1), Js(dt.day)),
MakeTime(
Js(dt.hour), Js(dt.minute), Js(dt.second),
Js(dt.microsecond // 1000)))
except:
raise MakeError(
'TypeError',
'Could not parse date %s - unsupported date format. Currently only supported format is RFC3339 utc. Sorry!'
% py_string)
def date_constructor(*args):
if len(args) >= 2:
return date_constructor2(*args)
elif len(args) == 1:
return date_constructor1(args[0])
else:
return date_constructor0()
def date_constructor0():
return now()
def date_constructor1(value):
v = value.to_primitive()
if v._type() == 'String':
v = parse_date(v.value)
else:
v = v.to_int()
return PyJsDate(TimeClip(v), prototype=DatePrototype)
def date_constructor2(*args):
y = args[0].to_number()
m = args[1].to_number()
l = len(args)
dt = args[2].to_number() if l > 2 else Js(1)
h = args[3].to_number() if l > 3 else Js(0)
mi = args[4].to_number() if l > 4 else Js(0)
sec = args[5].to_number() if l > 5 else Js(0)
mili = args[6].to_number() if l > 6 else Js(0)
if not y.is_nan() and 0 <= y.value <= 99:
y = y + Js(1900)
t = TimeClip(
LocalToUTC(MakeDate(MakeDay(y, m, dt), MakeTime(h, mi, sec, mili))))
return PyJsDate(t, prototype=DatePrototype)
Date.create = date_constructor
DatePrototype = PyJsDate(float('nan'), prototype=ObjectPrototype)
def check_date(obj):
if obj.Class != 'Date':
raise MakeError('TypeError', 'this is not a Date object')
class DateProto:
def toString():
check_date(this)
if this.value is NaN:
return 'Invalid Date'
offset = (UTCToLocal(this.value) - this.value) // msPerHour
return this.local_strftime(
'%a %b %d %Y %H:%M:%S GMT') + '%s00 (%s)' % (pad(
offset, 2, True), GetTimeZoneName(this.value))
def toDateString():
check_date(this)
return this.local_strftime('%d %B %Y')
def toTimeString():
check_date(this)
return this.local_strftime('%H:%M:%S')
def toLocaleString():
check_date(this)
return this.local_strftime('%d %B %Y %H:%M:%S')
def toLocaleDateString():
check_date(this)
return this.local_strftime('%d %B %Y')
def toLocaleTimeString():
check_date(this)
return this.local_strftime('%H:%M:%S')
def valueOf():
check_date(this)
return this.value
def getTime():
check_date(this)
return this.value
def getFullYear():
check_date(this)
if this.value is NaN:
return NaN
return YearFromTime(UTCToLocal(this.value))
def getUTCFullYear():
check_date(this)
if this.value is NaN:
return NaN
return YearFromTime(this.value)
def getMonth():
check_date(this)
if this.value is NaN:
return NaN
return MonthFromTime(UTCToLocal(this.value))
def getDate():
check_date(this)
if this.value is NaN:
return NaN
return DateFromTime(UTCToLocal(this.value))
def getUTCMonth():
check_date(this)
if this.value is NaN:
return NaN
return MonthFromTime(this.value)
def getUTCDate():
check_date(this)
if this.value is NaN:
return NaN
return DateFromTime(this.value)
def getDay():
check_date(this)
if this.value is NaN:
return NaN
return WeekDay(UTCToLocal(this.value))
def getUTCDay():
check_date(this)
if this.value is NaN:
return NaN
return WeekDay(this.value)
def getHours():
check_date(this)
if this.value is NaN:
return NaN
return HourFromTime(UTCToLocal(this.value))
def getUTCHours():
check_date(this)
if this.value is NaN:
return NaN
return HourFromTime(this.value)
def getMinutes():
check_date(this)
if this.value is NaN:
return NaN
return MinFromTime(UTCToLocal(this.value))
def getUTCMinutes():
check_date(this)
if this.value is NaN:
return NaN
return MinFromTime(this.value)
def getSeconds():
check_date(this)
if this.value is NaN:
return NaN
return SecFromTime(UTCToLocal(this.value))
def getUTCSeconds():
check_date(this)
if this.value is NaN:
return NaN
return SecFromTime(this.value)
def getMilliseconds():
check_date(this)
if this.value is NaN:
return NaN
return msFromTime(UTCToLocal(this.value))
def getUTCMilliseconds():
check_date(this)
if this.value is NaN:
return NaN
return msFromTime(this.value)
def getTimezoneOffset():
check_date(this)
if this.value is NaN:
return NaN
return (this.value - UTCToLocal(this.value)) // 60000
def setTime(time):
check_date(this)
this.value = TimeClip(time.to_number().to_int())
return this.value
def setMilliseconds(ms):
check_date(this)
t = UTCToLocal(this.value)
tim = MakeTime(
Js(HourFromTime(t)), Js(MinFromTime(t)), Js(SecFromTime(t)), ms)
u = TimeClip(LocalToUTC(MakeDate(Day(t), tim)))
this.value = u
return u
def setUTCMilliseconds(ms):
check_date(this)
t = this.value
tim = MakeTime(
Js(HourFromTime(t)), Js(MinFromTime(t)), Js(SecFromTime(t)), ms)
u = TimeClip(MakeDate(Day(t), tim))
this.value = u
return u
def setSeconds(sec, ms=None):
check_date(this)
t = UTCToLocal(this.value)
s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(
Day(t), MakeTime(Js(HourFromTime(t)), Js(MinFromTime(t)), s, milli))
u = TimeClip(LocalToUTC(date))
this.value = u
return u
def setUTCSeconds(sec, ms=None):
check_date(this)
t = this.value
s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(
Day(t), MakeTime(Js(HourFromTime(t)), Js(MinFromTime(t)), s, milli))
v = TimeClip(date)
this.value = v
return v
def setMinutes(min, sec=None, ms=None):
check_date(this)
t = UTCToLocal(this.value)
m = min.to_number()
if not sec is None: s = Js(SecFromTime(t))
else: s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(Day(t), MakeTime(Js(HourFromTime(t)), m, s, milli))
u = TimeClip(LocalToUTC(date))
this.value = u
return u
def setUTCMinutes(min, sec=None, ms=None):
check_date(this)
t = this.value
m = min.to_number()
if not sec is None: s = Js(SecFromTime(t))
else: s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(Day(t), MakeTime(Js(HourFromTime(t)), m, s, milli))
v = TimeClip(date)
this.value = v
return v
def setHours(hour, min=None, sec=None, ms=None):
check_date(this)
t = UTCToLocal(this.value)
h = hour.to_number()
if not min is None: m = Js(MinFromTime(t))
else: m = min.to_number()
if not sec is None: s = Js(SecFromTime(t))
else: s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(Day(t), MakeTime(h, m, s, milli))
u = TimeClip(LocalToUTC(date))
this.value = u
return u
def setUTCHours(hour, min=None, sec=None, ms=None):
check_date(this)
t = this.value
h = hour.to_number()
if not min is None: m = Js(MinFromTime(t))
else: m = min.to_number()
if not sec is None: s = Js(SecFromTime(t))
else: s = sec.to_number()
if not ms is None: milli = Js(msFromTime(t))
else: milli = ms.to_number()
date = MakeDate(Day(t), MakeTime(h, m, s, milli))
v = TimeClip(date)
this.value = v
return v
def setDate(date):
check_date(this)
t = UTCToLocal(this.value)
dt = date.to_number()
newDate = MakeDate(
MakeDay(Js(YearFromTime(t)), Js(MonthFromTime(t)), dt), TimeWithinDay(t))
u = TimeClip(LocalToUTC(newDate))
this.value = u
return u
def setUTCDate(date):
check_date(this)
t = this.value
dt = date.to_number()
newDate = MakeDate(
MakeDay(Js(YearFromTime(t)), Js(MonthFromTime(t)), dt), TimeWithinDay(t))
v = TimeClip(newDate)
this.value = v
return v
def setMonth(month, date=None):
check_date(this)
t = UTCToLocal(this.value)
m = month.to_number()
if not date is None: dt = Js(DateFromTime(t))
else: dt = date.to_number()
newDate = MakeDate(
MakeDay(Js(YearFromTime(t)), m, dt), TimeWithinDay(t))
u = TimeClip(LocalToUTC(newDate))
this.value = u
return u
def setUTCMonth(month, date=None):
check_date(this)
t = this.value
m = month.to_number()
if not date is None: dt = Js(DateFromTime(t))
else: dt = date.to_number()
newDate = MakeDate(
MakeDay(Js(YearFromTime(t)), m, dt), TimeWithinDay(t))
v = TimeClip(newDate)
this.value = v
return v
def setFullYear(year, month=None, date=None):
check_date(this)
if not this.value is NaN: t = UTCToLocal(this.value)
else: t = 0
y = year.to_number()
if not month is None: m = Js(MonthFromTime(t))
else: m = month.to_number()
if not date is None: dt = Js(DateFromTime(t))
else: dt = date.to_number()
newDate = MakeDate(
MakeDay(y, m, dt), TimeWithinDay(t))
u = TimeClip(LocalToUTC(newDate))
this.value = u
return u
def setUTCFullYear(year, month=None, date=None):
check_date(this)
if not this.value is NaN: t = UTCToLocal(this.value)
else: t = 0
y = year.to_number()
if not month is None: m = Js(MonthFromTime(t))
else: m = month.to_number()
if not date is None: dt = Js(DateFromTime(t))
else: dt = date.to_number()
newDate = MakeDate(
MakeDay(y, m, dt), TimeWithinDay(t))
v = TimeClip(newDate)
this.value = v
return v
def toUTCString():
check_date(this)
return this.utc_strftime('%d %B %Y %H:%M:%S')
def toISOString():
check_date(this)
t = this.value
year = YearFromTime(t)
month, day, hour, minute, second, milli = pad(
MonthFromTime(t) + 1), pad(DateFromTime(t)), pad(
HourFromTime(t)), pad(MinFromTime(t)), pad(
SecFromTime(t)), pad(msFromTime(t))
return ISO_FORMAT % (unicode(year) if 0 <= year <= 9999 else pad(
year, 6, True), month, day, hour, minute, second, milli)
def toJSON(key):
o = this.to_object()
tv = o.to_primitive('Number')
if tv.Class == 'Number' and not tv.is_finite():
return this.null
toISO = o.get('toISOString')
if not toISO.is_callable():
raise this.MakeError('TypeError', 'toISOString is not callable')
return toISO.call(o, ())
def pad(num, n=2, sign=False):
'''returns n digit string representation of the num'''
s = unicode(abs(num))
if len(s) < n:
s = '0' * (n - len(s)) + s
if not sign:
return s
if num >= 0:
return '+' + s
else:
return '-' + s
fill_prototype(DatePrototype, DateProto, default_attrs)
Date.define_own_property(
'prototype', {
'value': DatePrototype,
'enumerable': False,
'writable': False,
'configurable': False
})
DatePrototype.define_own_property('constructor', {
'value': Date,
'enumerable': False,
'writable': True,
'configurable': True
}) | PypiClean |
/LAMDA-SSL-1.0.2.tar.gz/LAMDA-SSL-1.0.2/LAMDA_SSL/Config/SSVAE.py | import torch.nn as nn
from LAMDA_SSL.Opitimizer.Adam import Adam
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from LAMDA_SSL.Transform.ToImage import ToImage
from LAMDA_SSL.Transform.ToTensor import ToTensor
transforms = None
target_transform = None
pre_transform = ToImage(channels=1,channels_first=False)
transform = ToTensor(dtype='float',image=True)
unlabeled_transform = ToTensor(dtype='float',image=True)
test_transform = ToTensor(dtype='float',image=True)
valid_transform = ToTensor(dtype='float',image=True)
train_dataset=None
labeled_dataset=LabeledDataset(pre_transform=pre_transform,transforms=transforms,
transform=transform,target_transform=target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=pre_transform,transform=unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=pre_transform,transform=valid_transform)
test_dataset=UnlabeledDataset(pre_transform=pre_transform,transform=test_transform)
#dataloader
train_dataloader=None
labeled_dataloader=LabeledDataLoader(batch_size=100,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=100,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=100,num_workers=0,drop_last=False)
# Batch sampler
train_batch_sampler=None
labeled_batch_sampler=None
unlabeled_batch_sampler=None
valid_batch_sampler=None
test_batch_sampler=None
# sampler
train_sampler=None
labeled_sampler=RandomSampler(replacement=True,num_samples=100*540)
unlabeled_sampler=RandomSampler(replacement=False)
test_sampler=SequentialSampler()
valid_sampler=SequentialSampler()
# optimizer
optimizer=Adam(lr=0.02)
# scheduler
scheduler=None
# network
network=None
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
mu=1
weight_decay=0
ema_decay=None
epoch=500
num_it_epoch=540
num_it_total=540*500
eval_it=None
eval_epoch=None
device='cpu'
parallel=None
file=None
verbose=False
alpha=5
num_labeled=None
dim_in=None
num_classes=None
dim_z=100
dim_hidden_de=[500, 500]
dim_hidden_en_y=[500, 500]
dim_hidden_en_z=[500, 500]
activations_de=[nn.Softplus(), nn.Softplus()]
activations_en_y=[nn.Softplus(), nn.Softplus()]
activations_en_z=[nn.Softplus(), nn.Softplus()] | PypiClean |
/Flask-KQMaps-0.4.2.tar.gz/Flask-KQMaps-0.4.2/flask_kqmaps/static/kqwebclient/leaflet/include-leaflet.js | (function () {
var r = new RegExp("(^|(.*?\\/))(include-leaflet\.js)(\\?|$)"),
s = document.getElementsByTagName('script'), targetScript;
for (var i = 0; i < s.length; i++) {
var src = s[i].getAttribute('src');
if (src) {
var m = src.match(r);
if (m) {
targetScript = s[i];
break;
}
}
}
function inputScript(url) {
var script = '<script type="text/javascript" src="' + url + '"><' + '/script>';
document.writeln(script);
}
function inputCSS(url) {
var css = '<link rel="stylesheet" href="' + url + '">';
document.writeln(css);
}
function inArray(arr, item) {
for (i in arr) {
if (String(arr[i]).trim() == item) {
return true;
}
}
return false;
}
function supportES6() {
var code = "'use strict'; class Foo {}; class Bar extends Foo {};";
try {
(new Function(code))();
} catch (err) {
return false;
}
if (!Array.from) {
return false;
}
return true;
}
//加载类库资源文件
function load() {
var includes = (targetScript.getAttribute('include') || "").split(",");
var excludes = (targetScript.getAttribute('exclude') || "").split(",");
// for import shape
// document.writeln('<button id="kqImportShapeUploadBtn" type="button"></button>');
if (!inArray(excludes, 'jquery')) {
inputScript(window.lib_path + "/jquery/jquery.min.js");
}
if (!inArray(excludes, 'leaflet')) {
inputCSS(window.lib_path + "/leaflet/leaflet.css");
inputScript(window.lib_path + "/leaflet/leaflet.js");
}
if (!inArray(excludes, 'proj4js')) {
inputScript(window.lib_path + "/proj4js/proj4.js");
}
if (!inArray(excludes, 'iconfont')) {
inputCSS(window.lib_path + "/iconfont/iconfont.css");
inputScript(window.lib_path + "/iconfont/iconfont.js");
}
if (inArray(includes, 'mapv')) {
inputScript(window.lib_path + "/mapv/mapv.min.js");
}
if (inArray(includes, 'turf')) {
inputScript(window.lib_path + "/Turf/turf.min.js");
}
if (inArray(includes, 'echarts')) {
inputScript(window.lib_path + "/echarts/echarts.min.js");
}
if (inArray(includes, 'leaflet.heatmap')) {
inputScript(window.lib_path + "/leaflet-heat/leaflet-heat.js");
}
if (inArray(includes, 'leaflet.osmbuildings')) {
inputScript(window.lib_path + "/leaflet.osmbuildings/OSMBuildings-Leaflet.debug.js");
}
if (inArray(includes, 'leaflet.markercluster')) {
inputCSS(window.lib_path + "/leaflet.markercluster/MarkerCluster.Default.css");
inputCSS(window.lib_path + "/leaflet.markercluster/MarkerCluster.css");
inputScript(window.lib_path + "/leaflet.markercluster/leaflet.markercluster.js");
}
if (inArray(includes, 'leaflet.icon.pulse')) {
inputCSS(window.lib_path + "/leaflet-icon-pulse/L.Icon.Pulse.css");
inputScript(window.lib_path + "/leaflet-icon-pulse/L.Icon.Pulse.js");
}
if (inArray(includes, 'leaflet.draw')) {
inputCSS(window.lib_path + "/leaflet.draw/leaflet.draw.css");
inputScript(window.lib_path + "/leaflet.draw/leaflet.draw.js");
}
if (inArray(includes, 'leaflet.pm')) {
inputCSS(window.lib_path + "/leaflet.pm/leaflet.pm.css");
inputScript(window.lib_path + "/leaflet.pm/leaflet.pm.min.js");
}
if (inArray(includes, 'leaflet.minimap')) {
inputCSS(window.lib_path + "/leaflet.minimap/Control.MiniMap.min.css");
inputScript(window.lib_path + "/leaflet.minimap/Control.MiniMap.min.js");
}
if (inArray(includes, 'leaflet.sidebyside')) {
inputScript(window.lib_path + "/leaflet-side-by-side/leaflet-side-by-side.min.js");
}
if (inArray(includes, 'leaflet.easybutton')) {
inputCSS(window.lib_path + "/leaflet.easybutton/easy-button.css");
inputScript(window.lib_path + "/leaflet.easybutton/easy-button.js");
}
if (inArray(includes, 'fontawesome')) {
inputCSS(window.lib_path + "/font-awesome/css/font-awesome.css");
}
if (inArray(includes, 'papaparse')) {
inputScript(window.lib_path + "/papaparse/papaparse.min.js");
}
if (inArray(includes, 'colorfilter')) {
inputScript(window.lib_path + "/leaflet-tilelayer-colorfilter/leaflet-tilelayer-colorfilter.min.js");
}
if (inArray(includes, 'extramarkers')) {
inputCSS(window.lib_path + "/Leaflet.ExtraMarkers/css/leaflet.extra-markers.min.css");
inputScript(window.lib_path + "/Leaflet.ExtraMarkers/js/leaflet.extra-markers.min.js");
}
if (inArray(includes, 'snakeanim')) {
inputScript(window.lib_path + "/snakeanim/L.Polyline.SnakeAnim.js");
}
if (inArray(includes, 'antpath')) {
inputScript(window.lib_path + "/leaflet-ant-path/leaflet-ant-path.js");
}
if (inArray(includes, 'mapbox-gl')) {
inputCSS(window.lib_path + "/mapbox-gl/mapbox-gl.css");
inputScript(window.lib_path + "/mapbox-gl/mapbox-gl.js");
}
if (inArray(includes, 'imagetransform')) {
inputScript(window.lib_path + "/Leaflet.imageTransform/L.ImageTransform.js");
}
if (inArray(includes, "TileLayer.Grayscale")) {
inputScript(window.lib_path + "/TileLayer.Grayscale/TileLayer.Grayscale.js")
}
if (inArray(includes, 'marker-highight')) {
inputCSS(window.lib_path + "/leaflet.marker.highlight/leaflet.marker.highlight.css");
inputScript(window.lib_path + "/leaflet.marker.highlight/leaflet.marker.highlight.js");
}
if (inArray(includes, 'mousecoordinate')) {
inputCSS(window.lib_path + "/leaflet.mousecoordinate/leaflet.mousecoordinate.css");
inputScript(window.lib_path + "/leaflet.mousecoordinate/leaflet.mousecoordinate.js");
}
if (inArray(includes, 'custom-layer')) {
inputScript(window.lib_path + "/Leaflet.CustomLayer/Leaflet.CustomLayer.js");
}
if (inArray(includes, "canvas-markers")) {
inputScript(window.lib_path + "/Leaflet.Canvas-Markers/leaflet.canvas-markers.js")
}
if (inArray(includes, "magic-marker")) {
inputCSS(window.lib_path + "/leaflet.magicMarker/magic.min.css");
inputCSS(window.lib_path + "/leaflet.magicMarker/leaflet.magicMarker.css");
inputScript(window.lib_path + "/leaflet.magicMarker/leaflet.magicMarker.js");
}
if (inArray(includes, "label-text-collision")) {
inputScript(window.lib_path + "/Leaflet.LabelTextCollision/L.LabelTextCollision.js");
}
if (inArray(includes, "smooth-marker-bouncing")) {
inputScript(window.lib_path + "/Leaflet.SmoothMarkerBouncing/leaflet.smoothmarkerbouncing.js");
}
if (inArray(includes, "ant-path")) {
inputScript(window.lib_path + "/Leaflet.AntPath/leaflet-ant-path.js");
}
if (inArray(includes, "marker-slide")) {
inputScript(window.lib_path + "/Leaflet.Marker.SlideTo/Leaflet.Marker.SlideTo.js");
}
if (inArray(includes, "motion")) {
inputScript(window.lib_path + "/leaflet.motion/leaflet.motion.js");
}
if (inArray(includes, "curve")) {
inputScript(window.lib_path + "/Leaflet.curve/leaflet.curve.js");
}
if (inArray(includes, "hotline")) {
inputScript(window.lib_path + "/Leaflet.hotline/leaflet.hotline.js")
}
if (inArray(includes, "arrow-heads")) {
inputScript(window.lib_path + "/Leaflet-arrowheads/leaflet-geometryutil.js");
inputScript(window.lib_path + "/Leaflet-arrowheads/leaflet-arrowheads.js");
}
if (inArray(includes, "fillPattern")) {
inputScript(window.lib_path + "/leaflet-polygon.fillPattern/leaflet-polygon.fillPattern.js");
}
if (inArray(includes, "semicircle")) {
inputScript(window.lib_path + "/Leaflet-semicircle/Semicircle.js");
}
if (inArray(includes, "ellipse")) {
inputScript(window.lib_path + "/Leaflet.ellipse/l.ellipse.js");
}
if (inArray(includes, "mask-canvas")) {
inputScript(window.lib_path + "/Leaflet.MaskCanvas/QuadTree.js");
inputScript(window.lib_path + "/Leaflet.MaskCanvas/L.GridLayer.MaskCanvas.js");
}
if (inArray(includes, "canvas-flow-map")) {
inputScript(window.lib_path + "/Leaflet.Canvas-Flowmap-Layer/CanvasFlowmapLayer.js");
}
if (inArray(includes, "river")) {
inputScript(window.lib_path + "/Leaflet.River/Leaflet.river.js");
}
// 加载矢量瓦片需要的库
if (inArray(includes, "vector-tile")) {
inputScript(window.lib_path + "/vector-tile/jquery.min.js");
inputScript(window.lib_path + "/vector-tile/jquery-ui.min.js");
inputScript(window.lib_path + "/vector-tile/kqlibs.js");
inputScript(window.lib_path + "/vector-tile/map-example_3.js");
inputScript(window.lib_path + "/vector-tile/other.js");
inputScript(window.lib_path + "/vector-tile/jquery.touchSwipe.js");
inputScript(window.lib_path + "/vector-tile/kqcss-black-black.js");
inputCSS(window.lib_path + "/vector-tile/kqvt.min.css");
inputScript(window.lib_path + "/vector-tile/kqvt.min.js");
inputCSS("//cdn.jsdelivr.net/npm/[email protected]/dist/skin-win8/ui.fancytree.min.css");
inputCSS("//cdn.jsdelivr.net/npm/[email protected]/dist/skin-win8/ui.fancytree.min.css");
inputScript("//cdn.jsdelivr.net/npm/[email protected]/dist/jquery.fancytree-all-deps.min.js");
inputCSS(window.lib_path + "/font-awesome/css/font-awesome.min.css");
inputCSS(window.lib_path + "/mapbox-gl/mapbox-gl.min.css");
inputScript(window.lib_path + "/mapbox-gl/mapbox-gl.js");
}
if (inArray(includes, "realtime")) {
inputScript(window.lib_path + "/leaflet-realtime/leaflet-realtime.min.js");
}
}
load();
})(); | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.