metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jmalvarezf-lmes/halyard",
"score": 2
} |
#### File: client/startup/mount-config.py
```python
import argparse
import base64
import json
import logging
import subprocess
import os
import grp
import pwd
import time
def mkdirs(path):
subprocess.check_call(["mkdir", "-p", path])
def subprocess_retry(supplier, retries, process_cmd):
if retries < 0:
logging.fatal("All retries of " + process_cmd + " attempted")
raise subprocess.CalledProcessError(process_cmd)
try:
return supplier()
except subprocess.CalledProcessError as err:
logging.warning("Subprocess failed " + str(err))
time.sleep(5)
subprocess_retry(supplier, retries - 1, process_cmd)
def authenticate(address, token):
process = ["vault", "auth", "-address", address, token]
subprocess_retry(lambda: subprocess.check_call(process), 5, ' '.join(process))
logging.info("Successfully authenticated against the vault server")
def read_secret(address, name):
process = ["vault", "read",
"-address", address,
"-format", "json",
"secret/{}".format(name)]
secret_data = subprocess_retry(lambda: json.loads(subprocess.check_output(process)
), 5, ' '.join(process))
logging.info("Retrieved secret {name} with request_id {rid}".format(
name=name,
rid=secret_data["request_id"])
)
warning = secret_data.get("warnings", None)
if not warning is None:
logging.warning("Warning: {}".format(warning))
return secret_data["data"]
def main():
parser = argparse.ArgumentParser(
description="Download secrets for Spinnaker stored by Halyard"
)
parser.add_argument("--token",
type=str,
help="Vault token for authentication.",
required=True
)
parser.add_argument("--address",
type=str,
help="Vault server's address.",
required=True
)
parser.add_argument("--secret",
type=str,
help="The secret name this instance config can be found in.",
required=True
)
args = parser.parse_args()
authenticate(args.address, args.token)
config_mount = read_secret(args.address, args.secret)
spinnaker_user = pwd.getpwnam("spinnaker").pw_uid
spinnaker_group = grp.getgrnam("spinnaker").gr_gid
for config in config_mount["configs"]:
secret_id = "{name}".format(name=config)
mount = read_secret(args.address, secret_id)
file_name = mount["file"]
contents = base64.b64decode(mount["contents"])
dir_name = os.path.dirname(file_name)
if not os.path.isdir(dir_name):
mkdirs(dir_name)
os.chown(dir_name, spinnaker_user, spinnaker_group)
with open(file_name, "w") as f:
logging.info("Writing config to {}".format(file_name))
f.write(contents)
os.chown(dir_name, spinnaker_user, spinnaker_group)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
``` |
{
"source": "jmalvinez/tensor2tensor",
"score": 3
} |
#### File: trax/layers/base.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import traceback
import jax
import numpy as onp
from tensor2tensor.trax import backend
from tensor2tensor.trax.backend import nested_map
from tensor2tensor.trax.backend import ShapeType
class Layer(object):
"""Base class for composable layers in a deep learning network.
Layers are the basic building blocks for deep learning models. A Trax layer
computes a function from zero or more inputs to zero or more outputs,
optionally using trainable parameters (common) and non-parameter state (not
common). Authors of new layer subclasses typically override at most two
methods of the base `Layer` class:
forward(inputs, params=(), state=(), **kwargs):
Computes this layer's output as part of a forward pass through the model.
new_params_and_state(self, input_shape, input_dtype, rng):
Returns a (params, state) pair suitable for initializing this layer.
A small subset of layer types are combinators -- they organize the computation
of their sublayers, e.g., applying their sublayers in series or in parallel.
All layers have the following properties, with default values implemented
in the base `Layer` class:
- n_inputs: int (default 1)
- n_outputs: int (default 1)
- params: tuple (default empty -- the layer has no parameters)
- state: tuple (default empty -- the layer has no non-parameter state)
- sublayers: tuple (default empty -- the layer has no sublayers)
The inputs to a layer are tensors, packaged according to how many there are:
- n_inputs = 0: an empty tuple ()
- n_inputs = 1: one tensor (NOT wrapped in a tuple)
- n_inputs > 1: a tuple of tensors
(The special treatment of the single-input case is meant to simplify the
work of layer writers; this design choice may be revisited in the future.)
The outputs from a layer are also tensors, packaged the same as layer inputs:
- n_outputs = 0: an empty tuple ()
- n_outputs = 1: the tensor (NOT wrapped in a tuple)
- n_outputs > 1: a tuple of tensors
The Trax runtime maintains a data stack with which layer calls are composed.
For more complex data network architectures, possibly involving multiple data
flows, one can view each layer as a function from stack state to stack state,
where the function's inputs are a slice from the stack, and the function's
outputs are spliced back into the stack.
"""
def __init__(self, n_inputs=1, n_outputs=1):
"""Creates a partially initialized, unconnected layer instance.
Args:
n_inputs: Number of inputs expected by this layer.
n_outputs: Number of outputs promised by this layer.
"""
self._n_inputs = n_inputs
self._n_outputs = n_outputs
self._sublayers = () # Default is no sublayers.
self._params = () # cached parameters
self._state = ()
self._caller = _find_frame(inspect.stack()) # for custom error messages
self._init_finished = False
def __repr__(self):
class_str = self.__class__.__name__
fields_str = 'in={},out={}'.format(self.n_inputs, self.n_outputs)
objs = self.sublayers
if objs:
objs_str = ', '.join(str(x) for x in objs)
return '{}{{{},sublayers=[{}]}}'.format(class_str, fields_str, objs_str)
else:
return '{}{{{}}}'.format(class_str, fields_str)
def forward(self, inputs, params=(), state=(), **kwargs):
"""Computes this layer's output as part of a forward pass through the model.
Authors of new Layer subclasses should override this method to define the
forward computation that their layer performs.
Args:
inputs: Input tensors, matching the number (n_inputs) expected by this
layer. Specifically:
- n_inputs = 0: an empty tuple ()
- n_inputs = 1: a tensor (NOT wrapped in a tuple)
- n_inputs > 1: a tuple of tensors, with n_inputs items
params: A tuple of trainable parameters, with one element for this layer
if this layer has no sublayers, or one for each sublayer if this
layer has sublayers. If a layer (or sublayer) has no trainable
parameters, the corresponding params element is an empty tuple.
state: Layer-specific non-parameter state that can update between batches.
**kwargs: Often empty; main current use is to carry a PRNG key for random
number generation, using the keyword 'rng'.
Returns:
Tensors, matching the number (n_outputs) promised by this layer.
Specifically:
- n_outputs = 0: an empty tuple
- n_outputs = 1: one tensor (NOT wrapped in a tuple)
- n_outputs > 1: a tuple of tensors, with n_outputs items
"""
raise NotImplementedError
def new_params_and_state(self, input_shape, input_dtype, rng):
"""Returns a (params, state) pair suitable for initializing this layer.
Authors of new Layer subclasses should override this method if their layer
uses trainable parameters or has non-parameter state that gets updated
between batches. The default implementation works for layers that have
no parameters or state.
Args:
input_shape: A tuple representing a shape (if this layer takes one input)
or a tuple of shapes (if this layer takes more than one input).
For example: (210, 160, 3) or ((210, 160, 3), (105, 80, 3)).
input_dtype: Numpy dtype(s) for each of the inputs.
rng: A PRNG key for random number generation.
"""
del input_shape, input_dtype, rng
return (), ()
@property
def n_inputs(self):
"""Returns how many tensors this layer expects as input."""
return self._n_inputs
@property
def n_outputs(self):
"""Returns how many tensors this layer promises as output."""
return self._n_outputs
@property
def sublayers(self):
"""Returns a tuple containing this layer's sublayers; may be empty."""
return self._sublayers
@property
def params(self):
"""Returns a tuple containing this layer's parameters; may be empty."""
return self._params
@params.setter
def params(self, params):
self._params = params
@property
def state(self):
"""Returns a tuple containing this layer's state; may be empty."""
return self._state
@state.setter
def state(self, state):
self._state = state
@property
def has_backward(self):
"""Returns True if this layer provides its own (custom) backward pass code.
A layer subclass that provides custom backward pass code (for custom
gradients) must override this method to return True.
"""
return False
def backward(self, inputs, output, grad, params, state, **kwargs):
"""Custom backward pass to propagate gradients in a custom way.
Args:
inputs: Input tensors; can be a (possibly nested) tuple.
output: The result of running this layer on inputs.
grad: gradient signal (called cotangent in jax) computed based on
subsequent layers. The structure and shape must match output.
params: layer parameters
state: start state.
**kwargs: kwargs for the layer
Returns:
The custom gradient signal for the input. Note that we need to return
a gradient for each argument of forward, so it will usually be a tuple
of signals: the gradient for inputs and parameters.
"""
raise NotImplementedError
# End of subclassing interface, all functions below are internal.
def pseudo_forward(self, pseudo_inputs, params, state):
"""Computes shapes and types this layer would produce for the given inputs.
Args:
pseudo_inputs: A ShapeType instance (input data minus the actual values)
or a tuple of ShapeType instances, following the same conventions as
Layer.forward's input arg.
params: Parameters for this layer.
state: start state.
Returns:
A tuple of (output, state).
The output part of the tuple is a ShapeType instance representing the
shape and type of the output (if this layer has one output) or a tuple
of ShapeType instances (if this layer has more than one output).
"""
try:
# Beware: using an actual RNG (as opposed to this ShapeType stub) would
# cause a large number of dropout masks to be computed and permanently
# stored in global memory.
rng = ShapeType(shape=(2,), dtype=onp.uint32)
def call_on_input(x, params, state, rng):
return self.forward(x, params=params, state=state, rng=rng)
params_shapes = nested_map(
params, lambda x: ShapeType(shape=x.shape, dtype=x.dtype))
s = backend.eval_on_shapes(call_on_input)(pseudo_inputs,
params_shapes, state, rng)
return s
except Exception:
name, trace = self.__class__.__name__, _short_traceback(skip=3)
raise LayerError(name, 'pseudo_forward', self._caller, pseudo_inputs,
None, trace)
def initialize_once(self, input_shapes, input_dtype, rng):
"""Initializes this layer and its sublayers recursively.
This method is designed to initialize each layer instance once, even if the
same layer instance occurs in multiple places in the network. This enables
weight sharing to be implemented as layer sharing.
Args:
input_shapes: A tuple representing a shape (if this layer takes one input)
or a tuple of shapes (if this layer takes more than one input).
For example: (210, 160, 3) or ((210, 160, 3), (105, 80, 3)).
input_dtype: Numpy dtype(s) for each of the inputs.
rng: A PRNG key for random number generation.
Returns:
A (params, state) tuple, in which params contains newly created parameters
on the first call and () on all subsequent calls.
"""
try:
# Initialize params once; store them for use when this layer is called.
# Needs to call new_params_and_state regardless of _init_finished because
# state also needs to be initialized. After jitting, graph pruning should
# be able to remove unnecessary computation.
# TODO(lukaszkaiser): Revisit this decision and see whether layers sharing
# params should also share states.
params, state = self.new_params_and_state(input_shapes, input_dtype, rng)
if not self._init_finished:
self._init_finished = True
self._params = params
self._state = state
else:
params = ()
return (params, state)
except Exception:
name, trace = self.__class__.__name__, _short_traceback(skip=3)
raise LayerError(name, 'initialize_once', self._caller, input_shapes,
input_dtype, trace)
# XXX(kitaev):
_STASH_IN = None
_STASH_OUT = None
def __call__(self, x, **kwargs):
"""Makes Layer instances callable; for use in tests or interactive settings.
This convenience method helps library users play with, test, or otherwise
probe the behavior of layers outside of a full training environment. It
presents the layer as callable function from inputs to outputs, with the
option of manually specifying parameters and non-parameter state per
individual call. For convenience, parameters and non-parameter state are
cached per layer instance, starting from default values of () and (), and
acquiring non-empty values either by initialization or from values
explicitly provided via the params and state keyword arguments.
Args:
x: 0 or more input tensors, formatted the same as the inputs to
Layer.forward.
**kwargs: Additional keyword arguments if needed/desired for this layer.
Three possible keyword arguments are especially relevant:
- params=... will override any cached params values
- state=... will override any cached state values
- rng=... will supply a PRNG key for use by the layer
Returns:
0 or more output tensors, formatted the same as the outputs from
Layer.forward.
"""
params = kwargs.pop('params', self.params)
state = kwargs.pop('state', self.state)
outputs, _ = self.apply_forward(x, params=params, state=state, **kwargs)
return outputs
def apply_forward(self, x, params=(), state=(), **kwargs):
"""Applies this layer as part of a forward pass; an internal system method.
This method is reserved for handling plumbing and other internal affairs
as needed by the overall library. Trax library users should use or override
the `forward` method instead.
Args:
x: See Layer.forward inputs.
params: See Layer.forward.
state: See Layer.forward.
**kwargs: See Layer.forward.
Returns:
See Layer.forward.
"""
try:
# If params are nothing, we may be reusing this layer.
# Use the cached parameters to calculate the value.
# Note: to make sure jit tracers can decide this branch in python we
# use "params is ()" instead of, e.g., "not params" or "params == ()".
if params is (): # pylint: disable=literal-comparison
params = self._params
else:
# In this case, we're called for the first time: cache parameters.
self._params = params
if not self.has_backward or Layer._STASH_IN is not None:
outputs, s = self.forward(x, params=params, state=state, **kwargs)
else:
outputs, s = self._do_custom_gradients(x, params, state, **kwargs)
self._state = s
return outputs, s
except Exception:
name, trace = self.__class__.__name__, _short_traceback()
raise LayerError(name, 'apply_forward', self._caller,
shapes(x), None, trace)
def _do_custom_gradients(self, x, params, state, **kwargs):
"""Calls this layer for a forward pass, but with custom gradients."""
assert backend.get_name() == 'jax', (
'Custom gradients are only supported in JAX for now.')
# TODO(wangpeng): JAX doesn't support custom grads for functions with
# auxiliary output yet (https://github.com/google/jax/issues/844). Will
# remove the constraints on state below when this feature is added to
# JAX.
assert not jax.tree_util.tree_leaves(state), (
'Custom gradients require trivial start state. Got %s' % str(state))
def check_end_state(output_state):
output, state = output_state
assert not jax.tree_util.tree_leaves(state), (
'Custom gradients require trivial end state. Got %s' % str(state))
return output
# See this link for how custom transformations are defined in JAX:
# https://jax.readthedocs.io/en/latest/jax.html#jax.custom_transforms
# Note that we capture the kwargs and don't calculate gradients wrt. them.
@jax.custom_transforms
def _do_forward(y, params):
return check_end_state(self.forward(y, params=params, state=state,
**kwargs))
# This is the custom gradient (vector-jacobian product in JAX) function.
# For the exact specification of this custom transformation see this link:
# https://jax.readthedocs.io/en/latest/jax.html#jax.defjvp_all
def do_forward_vjp(y, params):
"""Custom gradient (vjp) function."""
stash = None
if Layer._STASH_IN is None:
Layer._STASH_IN = stash = {}
output = check_end_state(self.forward(y, params=params, state=state,
**kwargs))
if stash is not None:
Layer._STASH_IN = None
def vjpfun(grad):
assert Layer._STASH_OUT is None
Layer._STASH_OUT = stash
res = self.backward(y, output, grad, params, state, **kwargs)
Layer._STASH_OUT = None
return res
return output, vjpfun
jax.defvjp_all(_do_forward, do_forward_vjp)
return _do_forward(x, params), state
class LayerError(Exception):
"""Exception raised in the layer stack.
Attributes:
message: the message corresponding to this exception.
"""
def __init__(self, layer_name, function_name, caller,
input_shapes, input_types, traceback_string):
self._layer_name = layer_name
self._function_name = function_name
self._caller = caller # Python inspect object with init caller info.
self._traceback = traceback_string
self._input_shapes = input_shapes
self._input_types = input_types
super(LayerError, self).__init__(self.message)
@property
def message(self):
"""Create error message."""
prefix = 'Exception passing through layer '
prefix += '%s (in %s):\n' % (self._layer_name, self._function_name)
short_path = '[...]/' + '/'.join(self._caller.filename.split('/')[-3:])
caller = ' layer created in file %s, line %d\n' % (short_path,
self._caller.lineno)
shapes_str = ' layer input shapes: %s\n\n' % str(self._input_shapes)
if self._input_types is not None:
types_str = ' layer input types: %s\n' % str(self._input_types)
shapes_str = types_str + shapes_str
return prefix + caller + shapes_str + self._traceback
def _apply_to_first_n(f, x, n):
"""Helper: apply f to first n elements on the stack x if n > 0."""
if n < 1:
return f(x)
argument, rest = x[:n], x[n:]
if n == 1:
argument = argument[0]
result = f(argument)
if not rest:
return result
if n == 1:
result = [result]
result = list(result) + list(rest)
if isinstance(x, tuple):
result = tuple(result)
return result
def nested_reduce(x, f):
"""Fold the function f to the nested structure x (dicts, tuples, lists)."""
if isinstance(x, list):
return f([nested_reduce(y, f) for y in x])
if isinstance(x, tuple):
return f([nested_reduce(y, f) for y in x])
return x
def shapes(x):
"""Get a structure of shapes for a structure of nested arrays."""
def shape(x):
try:
return tuple([int(i) for i in x.shape])
except Exception: # pylint: disable=broad-except
return []
return nested_map(x, shape)
def sizes(x):
"""Get a structure of sizes for a structure of nested arrays."""
def size(x):
try:
return x.size
except Exception: # pylint: disable=broad-except
return 0
return nested_map(x, size)
def _find_frame(stack, start=0):
"""Find the frame with the caller on the stack."""
# We want to find the first place where the layer was called
# that is *not* an __init__ function of an inheriting layer.
frame = inspect.getframeinfo(stack[start][0])
# If we are in an init, move on.
if frame.function == '__init__':
return _find_frame(stack, start + 1)
return frame
def _shorten_file_path(line):
"""Shorten file path in error lines for more readable tracebacks."""
start = line.lower().find('file')
if start < 0:
return line
first_quote = line.find('"', start)
if first_quote < 0:
return line
second_quote = line.find('"', first_quote + 1)
if second_quote < 0:
return line
path = line[first_quote + 1:second_quote]
new_path = '/'.join(path.split('/')[-3:])
return line[:first_quote] + '[...]/' + new_path + line[second_quote + 1:]
def _short_traceback(skip=3):
"""Cleaned-up form of traceback."""
counter, res = 0, []
# Skipping 3 lines by default: the top (useless) and self-call.
lines = traceback.format_exc().splitlines()[skip:]
for l in lines:
res.append(_shorten_file_path(l))
if counter % 2 == 1:
res.append('')
counter += 1
# If we see a LayerError, the traceback has already been processed.
if l.startswith('LayerError'):
# Skip 4 back except last as these are internal base-layer calls.
res = res[:-4] + [res[-1]]
res += lines[counter:]
break
return '\n'.join(res)
def _validate_forward_input(x, n_inputs):
if n_inputs != 1:
if not isinstance(x, tuple):
raise TypeError(
'expected input to be a tuple; instead received {}'.format(type(x)))
if len(x) != n_inputs:
raise ValueError(
'input tuple length ({}) does not equal required number of inputs'
' ({})'.format(len(x), n_inputs))
def layer(n_inputs=1, n_outputs=1, new_params_and_state_fn=None):
"""Returns a decorator that converts a function into a Layer class builder."""
def _build_layer_class(raw_fn):
"""Returns a Layer class whose callable instances execute the function."""
def _init(self, **kwargs):
self._kwargs = kwargs # pylint: disable=protected-access
Layer.__init__(self, n_inputs=n_inputs, n_outputs=n_outputs)
def _new_params_and_state(self, input_shapes, input_dtype, rng):
if new_params_and_state_fn is None:
return (), ()
kwargs = self._kwargs # pylint: disable=protected-access
return new_params_and_state_fn(input_shapes, input_dtype, rng, **kwargs)
def _is_empty(raw_output):
return raw_output is None or (isinstance(raw_output, (list, tuple))
and len(raw_output) == 0) # pylint: disable=g-explicit-length-test
def _forward(self, x, params=(), state=(), **kwargs):
"""Uses this layer as part of a forward pass through the model."""
merged_kwargs = kwargs.copy()
merged_kwargs.update(self._kwargs) # pylint: disable=protected-access
_validate_forward_input(x, n_inputs)
raw_output = raw_fn(x, params=params, **merged_kwargs)
output = () if _is_empty(raw_output) else raw_output
return (output, state)
# Set docstrings and create the class.
_forward.__doc__ = raw_fn.__doc__
_new_params_and_state.__doc__ = new_params_and_state_fn.__doc__
# Note: None.__doc__ is None
cls = type(raw_fn.__name__, (Layer,),
{'__init__': _init,
'forward': _forward,
'new_params_and_state': _new_params_and_state})
return cls
return _build_layer_class
def _random_values(input_shapes, rng, integer_inputs=False):
"""Creates random floats or ints of the given shape.
Args:
input_shapes: A tuple representing a shape (if the layer takes one input)
or a tuple of shapes (if this layer takes more than one input).
For example: (210, 160, 3) or ((210, 160, 3), (105, 80, 3)).
rng: A random number generator.
integer_inputs: If True, use numpy int32 to produce the random data, else
use float32.
Returns:
Random values with the shape and type specified.
"""
if isinstance(input_shapes[0], int):
# Non-nested shape, create a random tuple.
if not integer_inputs:
return backend.random.uniform(rng, input_shapes, minval=-1.0, maxval=1.0)
return backend.random.bernoulli(rng, 0.5, input_shapes).astype(onp.int32)
elif isinstance(input_shapes, tuple): # Nested shape: tuple.
return tuple(_random_values(x, rng, integer_inputs) for x in input_shapes)
else:
raise TypeError(type(input_shapes))
def _is_tuple_of_shapes(shape):
# TODO(jonni): Find better way to distinguish a shape from a tuple of shapes.
if not isinstance(shape, tuple):
raise TypeError('shape must be a tuple or tuple of tuples, instead got:'
' {}'.format(shape))
return isinstance(shape, tuple) and isinstance(shape[0], tuple)
def check_shape_agreement(layer_obj, input_shapes, integer_inputs=False):
"""Checks if the layer's call output agrees its pseudo_forward predictions.
This function helps test layer mechanics and inter-layer connections that
aren't dependent on specific data values.
Args:
layer_obj: A Layer instance.
input_shapes: A tuple representing a shape (if the layer takes one input)
or a tuple of shapes (if this layer takes more than one input).
For example: (210, 160, 3) or ((210, 160, 3), (105, 80, 3)).
integer_inputs: If True, use numpy int32 as the type for the pseudo-data,
else use float32.
Returns:
A tuple representing either a single shape (if the layer has one output) or
a tuple of shape tuples (if the layer has more than one output).
"""
rng1, rng2, rng3 = backend.random.split(backend.random.get_prng(0), 3)
input_dtype = onp.int32 if integer_inputs else onp.float32
if _is_tuple_of_shapes(input_shapes):
pseudo_data = tuple(ShapeType(x, input_dtype) for x in input_shapes)
input_dtype = tuple(input_dtype for _ in input_shapes)
else:
pseudo_data = ShapeType(input_shapes, input_dtype)
params, state = layer_obj.initialize_once(input_shapes, input_dtype, rng1)
pseudo_output, _ = layer_obj.pseudo_forward(pseudo_data, params, state)
if isinstance(pseudo_output, tuple):
output_shape = tuple(x.shape for x in pseudo_output)
else:
output_shape = pseudo_output.shape
random_input = _random_values(input_shapes, rng2, integer_inputs)
real_output = layer_obj(random_input, params=params, state=state, rng=rng3)
result_shape = shapes(real_output)
msg = 'output shape %s != real result shape %s' % (output_shape, result_shape)
assert output_shape == result_shape, msg
# TODO(jonni): Remove this assert? It makes test logs harder to read.
return output_shape
```
#### File: trax/rl/online_tune.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def historical_metric_values(history, metric):
"""Converts a metric stream from a trax History object into a numpy array."""
metric_sequence = history.get(*metric)
metric_values = np.array([
metric_value for (_, metric_value) in metric_sequence
])
if np.any(np.isnan(metric_values)):
# Zero out all observations if any element is NaN. This way the agent
# doesn't get any rewards, so it learns to avoid those regions.
metric_values[:] = 0.0
return metric_values
def control_to_observation(control_values, control_config, observation_range):
"""Flips, logarithms, clips and scales the control to observation_range."""
(_, _, (low, high), flip) = control_config
def transform(x):
return np.log(maybe_flip(x, flip))
(log_control_values, log_low, log_high) = map(
transform, (control_values, low, high)
)
if flip:
(log_low, log_high) = (log_high, log_low)
log_control_values = np.clip(log_control_values, log_low, log_high)
# Rescale the log control values to the observation range.
(obs_low, obs_high) = observation_range
return (
(log_control_values - log_low) / (log_high - log_low) *
(obs_high - obs_low) + obs_low
)
def control_metric(name):
"""Returns the (mode, metric) pair in History for the given control."""
return ("train", "training/{}".format(name))
def maybe_flip(value, flip):
"""Flips a control (or not).
Meant to translate controls that naturally take values close to 1
(e.g. momentum) to a space where multiplication makes sense (i.e. close to 0).
Args:
value: float or numpy array, value of the control.
flip: bool, whether to flip or not.
Returns:
Either value or 1 - value based on flip.
"""
if flip:
value = 1 - value
return value
def history_to_observations(
history, metrics, observation_range, control_configs=None):
"""Converts a trax History object into a sequence of observations."""
(obs_low, obs_high) = observation_range
observation_dimensions = [
np.clip(historical_metric_values(history, metric), obs_low, obs_high)
for metric in metrics
]
if control_configs is not None:
for control_config in control_configs:
(control_name, _, _, _) = control_config
observation_dimensions.append(control_to_observation(
historical_metric_values(history, control_metric(control_name)),
control_config,
observation_range,
))
return np.stack(observation_dimensions, axis=1)
def update_control(control_config, action, history, action_multipliers):
"""Calculates a new value of a control based on an action."""
(name, _, (low, high), flip) = control_config
metric = control_metric(name)
control_values = historical_metric_values(history, metric)
assert control_values.shape[0] > 0, (
"No last control {} found in history.".format(name))
current_control = control_values[-1]
(current_control, low, high) = maybe_flip(
np.array([current_control, low, high]), flip
)
if flip:
(low, high) = (high, low)
new_control = np.clip(
current_control * action_multipliers[action], low, high
)
return maybe_flip(new_control, flip)
``` |
{
"source": "jmalzac/snips-jus",
"score": 2
} |
#### File: snips-jus/jushack/jushack.py
```python
import os
from os.path import expanduser
import subprocess
import serial
import serial.tools.list_ports
import sys
import time
import warnings
MAX_JUS = 1
MIN_JUS = 1
class JusHack:
extra_size_dict = {
u'petit': 1,
u'grand': 2,
}
jus_size_dict = {
u'petit': 1,
u'grand': 2,
}
"""
ok
"""
@staticmethod
def compute_value(jus_size):
print("preparing: %i %s %s %s" % (jus_size))
size = JusHack.jus_size_dict.get(jus_size,
JusHack.jus_size_dict[u''])
return size
@classmethod
def __init__(self, locale = "EN_US", extra = False):
arduino_ports = [
p.device
for p in serial.tools.list_ports.comports()
for x in range (0, 10)
if 'ttyUSB%d' % x in p.name or "ttyACM%d" % x in p.name]
if not arduino_ports:
raise IOError("No Arduino found")
if len(arduino_ports) > 1:
warnings.warn('Multiple Arduinos found - using the first')
self.ser = serial.Serial(
port=arduino_ports[0],
baudrate = 9600
)
if (extra):
JusHack.jus_size_dict = JusHack.extra_size_dict
@classmethod
def jus(self, jus_size):
jus_size =jus_size.encode('utf8')
number = max(number, MIN_JUS)
number = min(number, MAX_JUS)
print(jus_size)
value = JusHack.compute_value(jus_size, u'')
print(value)
self.ser.write('B%dE\n'%(value))
if (__name__ == "__main__"):
c = JusHack();
c.jus("normal", "petit",1)
c.jus("normal", "grand",2)
``` |
{
"source": "jmamath/learning-approximate-invariance-requires-far-fewer-data",
"score": 2
} |
#### File: jmamath/learning-approximate-invariance-requires-far-fewer-data/classes.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import os
import time
import torch
from torch.distributions import Normal
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import trange
from torchvision.transforms import transforms
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import seaborn as sns
import imgaug
#from keras.datasets import mnist
from imgaug import augmenters as iaa
#from keras.utils import np_utils
torch.set_default_tensor_type('torch.cuda.FloatTensor')
#@title Early Stopping class
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model, path):
score = -val_loss
# 1st iteration
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, path)
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, path)
self.counter = 0
def save_checkpoint(self, val_loss, model, path):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
# if not os.path.exists(path):
# os.makedirs(path)
# torch.save(model.state_dict(), path+'/checkpoint.pt')
self.val_loss_min = val_loss
# @title Dataset class
class MyData(Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, data, labels, return_perturb=False, sample_size=None, augmentation=None, training=False):
'Initialization'
self.labels = labels
self.data = data
self.return_perturb = return_perturb
self.augmentation = augmentation
self.sample_size = sample_size
self.training = training
def __len__(self):
'Denotes the total number of samples'
return len(self.data)
def __getitem__(self, index):
'Generates one sample of data'
# Select sample
X = self.data[index]
h, w = X.shape
# Load data and get label
y = self.labels[index]
if self.return_perturb==False:
X = X.reshape(-1)
return X,y
elif self.sample_size > 1:
X = X.cpu()
y = y.cpu()
X_repeated = np.tile(X, [self.sample_size, 1, 1]) # Because we want X.shape = (sample_size, 28,28)
y_repeated = np.tile(y, [self.sample_size, 1]) # Because we want y.shape = (sample_size, 10)
X_aug = self.augmentation(images=X_repeated)
if self.training:
# import pdb; pdb.set_trace()
X_repeated = X_repeated.reshape(self.sample_size,-1)
X_aug = X_aug.reshape(self.sample_size,-1)
return X_repeated, X_aug, y_repeated
else:
X_aug = self.augmentation(images=X)
X_aug = X_aug.reshape(-1)
X = X.reshape(-1)
return X, X_aug, y
# @title Gaussian Layer class
class GaussianLayer(nn.Module):
def __init__(self, shape, standard=False):
super(GaussianLayer, self).__init__()
self.shape = shape
if standard is True:
self.mu = nn.Parameter(torch.zeros(shape))
self.log_var = nn.Parameter(torch.zeros(shape))
else:
self.mu = nn.Parameter(torch.rand(shape))
self.log_var = nn.Parameter(torch.rand(shape))
def forward(self, num_samples=1):
if not isinstance(num_samples, tuple):
num_samples = (num_samples,)
eps_shape = num_samples + self.shape
eps = torch.randn(eps_shape) # ~ N(0,I)
return self.mu + torch.exp(self.log_var) * eps
def entropy(self):
distribution = Normal(loc=self.mu, scale=self.log_var.exp())
return distribution.entropy().mean()
# @title Invariant Prior class
############### 2. CREATE THE MODEL ###############
class ApproximateInvariance(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, sample_size, prior=GaussianLayer):
super(ApproximateInvariance, self).__init__()
self.prior = prior
self.sample_size = sample_size
self.input_dim = input_dim
self.output_dim = output_dim
self.weight_1 = prior((hidden_dim, input_dim), standard=True)
self.bias_1 = prior((hidden_dim,), standard=True)
self.weight_2 = prior((output_dim, hidden_dim), standard=True)
self.bias_2 = prior((output_dim,), standard=True)
def batch_forward(self, x, x_aug):
# We remove the num_sample dimension if it is equal to one.
w1 = self.weight_1().squeeze(0)
b1 = self.bias_1()
w2 = self.weight_2().squeeze(0)
b2 = self.bias_2()
x = F.linear(x, w1, b1)
x = F.relu(x)
x = F.linear(x, w2, b2)
x = F.softmax(x) + 1e-8
x_aug = F.linear(x_aug, w1, b1)
x_aug = F.relu(x_aug)
x_aug = F.linear(x_aug, w2, b2)
x_aug = F.softmax(x_aug) + 1e-8
return x, x_aug
def forward(self, x, x_aug):
"""
We need to compute the output of the neural network for the input x
and the augmented input x_aug with the same weights. And we need
to sample a new set of weights for each augmentation, hence the loop
Input:
x: torch Tensor. shape = (batch_size, num_sample, input_dim)
x_aug: has the same attribute as x. but here for each num_sample there is a different augmentation
while for x the tensor is repeated to leverage broadcasting.
"""
if self.sample_size > 1:
batch_size, num_samples, _ = x.shape
results = torch.zeros(batch_size, num_samples, self.output_dim)
results_aug = torch.zeros_like(results)
for i in range(num_samples):
results[:,i], results_aug[:,i] = self.batch_forward(x[:,i], x_aug[:,i])
else:
results, results_aug = self.batch_forward(x, x_aug)
return results, results_aug
def entropy(self):
"""
Each weight computes its own entropy
"""
entropy_w1 = self.weight_1.entropy()
entropy_b1 = self.bias_1.entropy()
entropy_w2 = self.weight_2.entropy()
entropy_b2 = self.bias_2.entropy()
return entropy_w1 + entropy_b1 + entropy_w2 + entropy_b2
def kl_div_output(pred1, pred2, sample_size):
"""
This function computes the KL divergence between the output of
the standard neural network and and neural network with augmented data
Input:
pred1. Float tensor. K-class softmax prediction of network 1
pred2. Float tensor. K-class softmax prediction of network 2
Output:
kl_div. Float. The KL divergence between the two
"""
if sample_size > 1:
batch_size, num_sample, output_dim = pred1.shape
log_ratio = torch.log(pred1/pred2)
kl_div = torch.mean(pred1 * log_ratio, axis=[0,1]) # Average over num_sample and batches
return kl_div.sum()
else:
log_ratio = torch.log(pred1/pred2)
kl_div = torch.mean(pred1 * log_ratio, axis=0) # Average over batches
return kl_div.sum()
# @title Bayes by Backprogagation class
class BayesbyBackprop(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, prior):
super(BayesbyBackprop, self).__init__()
self.prior = prior
self.weight_1 = GaussianLayer((hidden_dim, input_dim))
self.bias_1 = GaussianLayer((hidden_dim,))
self.weight_2 = GaussianLayer((output_dim, hidden_dim))
self.bias_2 = GaussianLayer((output_dim,))
def forward(self, x):
# We remove the num_sample dimension if it is equal to one.
w1 = self.weight_1().squeeze(0)
b1 = self.bias_1()
w2 = self.weight_2().squeeze(0)
b2 = self.bias_2()
# import pdb; pdb.set_trace()
x = F.linear(x, w1, b1)
x = F.selu(x)
x = F.linear(x, w2, b2)
x = F.selu(x)
return x
def sample(self, num_samples=5):
w1_samples = self.weight_1(num_samples=num_samples).view((num_samples, -1))
b1_samples = self.bias_1(num_samples=num_samples).view((num_samples, -1))
w2_samples = self.weight_2(num_samples=num_samples).view((num_samples, -1))
b2_samples = self.bias_2(num_samples=num_samples).view((num_samples, -1))
gen_weights = torch.cat([w1_samples, b1_samples, w2_samples, b2_samples], 1)
return gen_weights
def __kl(self, mu_1, log_var_1, mu_2, log_var_2):
"""
KL divergence between two univariate Gaussian
"""
var_1 = log_var_1.exp()
var_2 = log_var_2.exp()
kl = torch.mean(log_var_2-log_var_1 + (var_1.pow(2)-var_2.pow(2) + (mu_1-mu_2).pow(2))/(2 * var_2.pow(2)))
return kl
def KL_loss(self):
kl_w1 = self.__kl(self.weight_1.mu, self.weight_1.log_var, self.prior.weight_1.mu, self.prior.weight_1.log_var)
kl_b1 = self.__kl(self.bias_1.mu, self.bias_1.log_var, self.prior.bias_1.mu, self.prior.bias_1.log_var)
kl_w2 = self.__kl(self.weight_2.mu, self.weight_2.log_var, self.prior.weight_2.mu, self.prior.weight_2.log_var)
kl_b2 = self.__kl(self.bias_2.mu, self.bias_2.log_var, self.prior.bias_2.mu, self.prior.bias_2.log_var)
return (kl_w1 + kl_w2 + kl_b1 + kl_b2)/4
``` |
{
"source": "jmamath/OOD-Generalization",
"score": 3
} |
#### File: jmamath/OOD-Generalization/uncertainty.py
```python
from utils_two_moons import predict_model, MyData
import numpy as np
from torch.utils.data import DataLoader
def compute_entropy(p):
'''
Computes the entropy of a categorical probability distribution
It handle the case where one of the class has probability 1. and all the other have 0.
It sets the convention: 0 * log(0) = 0.
The function can handle
Args:
p: Float array of any dimension (:, ... ,:, d). The last dimension in each case must be a probability distribution
i.e it must sums to 1.
Return
entropy: Float. The entropy of the probability distribution p.
'''
zero_probs = p == 0
log_p = np.log(p)
log_p[zero_probs] = 0
entropy = -(p * log_p).sum(-1)
return entropy
def compute_predictive_uncertainty(model, loader, MC_sample, no_classes):
"""
Computes the predictive entropy H(y|x,D)
"""
probs = predict_model(model, loader, MC_sample, no_classes)
batch_size, _ = probs.shape
predictive_uncertainty = compute_entropy(probs)
assert predictive_uncertainty.shape == (batch_size,)
return predictive_uncertainty
def retain_data(data, retain, mode):
"""
Sort the data, and return a percentage in ascending or descending order.
This function is intended to be used with entropies or probabilities. In which
case we would retain data points with lowest entropies, or highest probabilities.
Args:
data: np array of unordered value
retain: Float number between 0 and 1, refer to the percentage
of certain data to keep.
Return:
cerain_data: most certain retained data
"""
if not isinstance(data, (np.ndarray,)):
data = np.array(data)
# Sort data
sorted_id = np.argsort(data)
batch_size = data.shape[0]
truncate = int(batch_size * retain)
if mode=="ascending":
certain_id = sorted_id[:truncate]
elif mode=="descending":
certain_id = sorted_id[::-1][:truncate]
else:
raise Exception("mode should be ascending or descending")
return certain_id
def sample_lowest_entropy(retain, model, data, labels, MC_sample, no_classes):
'''
Sample a percentage of data with the highest certainty
Args:
retain: Float number between 0 and 1, refer to the percentage
model: predictive function
data: Pool of data.
labels: Pool of labels
Return
low_entropy_data: the retained data with the lowest entropy
low_entropy_labels: labels associated with retained data with the lowest entropy
'''
# We need the loader only to use predictions
loader = DataLoader(MyData(data, labels), batch_size=512, shuffle=False)
predictions = predict_model(model, loader, MC_sample, no_classes)
# import pdb;pdb.set_trace()
entropies = compute_entropy(predictions).squeeze()
low_entropies_id = retain_data(entropies, retain, "ascending")
# import pdb; pdb.set_trace()
low_entropy_data = data[low_entropies_id]
low_entropy_labels = labels[low_entropies_id]
new_loader = DataLoader(MyData(low_entropy_data, low_entropy_labels), batch_size=512, shuffle=False)
return new_loader
def sample_highest_density(retain, kde, pca, data, labels):
"""
Sample a percentage of data with the highest probability score
Args:
retain: Float number between 0 and 1, refer to the percentage
kde, pca: density functions
data: Pool of data.
labels: Pool of labels
Return
low_entropy_data: the retained data with the lowest entropy
low_entropy_labels: labels associated with retained data with the lowest entropy
"""
log_probabilities = kde.score_samples(pca.transform(data))
probabilities = np.power(np.exp(1), log_probabilities)
high_probabilities_id = retain_data(probabilities, retain, "descending")
high_probabilities_data = data[high_probabilities_id]
high_probabilities_labels = labels[high_probabilities_id]
new_loader = DataLoader(MyData(high_probabilities_data, high_probabilities_labels), batch_size=512, shuffle=False)
return new_loader
def sample_lowest_entropy_highest_density(retain, model, kde, pca, data, labels, MC_sample, no_classes):
'''
Sample a percentage of data with the highest certainty and the lowest entropy.
Since we want to sample in different mode ascending for entropy and
descending for density, we use -H(y|x)p(x) as our metric so that they
we can just take the highest values
Args:
retain: Float number between 0 and 1, refer to the percentage
model: predictive function
data: Pool of data.
labels: Pool of labels
Return
low_entropy_high_density_data: the retained data with the lowest entropy
low_entropy_high_density_labels: labels associated with retained data with the lowest entropy
'''
# We need the loader only to use predictions
loader = DataLoader(MyData(data, labels), batch_size=512, shuffle=False)
predictions = predict_model(model, loader, MC_sample, no_classes)
# import pdb;pdb.set_trace()
entropies = compute_entropy(predictions).squeeze()
log_probabilities = kde.score_samples(pca.transform(data))
probabilities = np.power(np.exp(1), log_probabilities)
entropy_density = (1/entropies+ 1e-6) * probabilities
low_entropies_high_density_id = retain_data(entropy_density, retain, "descending")
# import pdb; pdb.set_trace()
low_entropy_high_density_data = data[low_entropies_high_density_id]
low_entropy_high_density_labels = labels[low_entropies_high_density_id]
new_loader = DataLoader(MyData(low_entropy_high_density_data, low_entropy_high_density_labels), batch_size=512, shuffle=False)
return new_loader
``` |
{
"source": "jman005/ArchiveIt",
"score": 3
} |
#### File: ArchiveIt/archiveit/bot.py
```python
from archiveit import libformatter, config
from archiveit.hosts import HostException
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives import hashes
from praw import Reddit
from praw.exceptions import APIException, ClientException
from prawcore.exceptions import Forbidden, NotFound
from praw.models import Message
import requests
import time
import logging
logger = logging.getLogger("archiveit.bot")
# constants
PROCESSES = 2
bottomtext = ("\n\n---\n\n^^[About]"
"(https://www.reddit.com/r/archiveit/"
"comments/9ltg4x/what_is_archiveit_and"
"_faq/) | by /u/jman005"
)
host = config.host()
# #
def crypto_sign(msg, key, psw):
"""Signs utf-8 string with provided private key,
returns bytes"""
with open(key, "rb") as key1:
pemkey = serialization.load_pem_private_key(key1.read(), password=<PASSWORD>, backend=default_backend())
return pemkey.sign(
bytes(msg, 'utf-8'),
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
def make_reddit():
return Reddit(user_agent=config.useragent,
username=config.username,
password=<PASSWORD>,
client_id=config.clientid,
client_secret=config.clientsecret
)
def bot_formatter(post_id, reddit, formatter):
"""Formats a Reddit post.
Arguments should be packed
into a tuple, in the form
(submission id, reddit instance
to use, formatter type)."""
if formatter is not None:
formatter_active = formatter(reddit.submission(id=post_id))
reply = formatter_active.out()
else:
return None
return {'text': reply, 'filetype': formatter_active.filetype}
def task_worker(task, reddit):
logger.info("Archive request received from /u/%s - ID %s" % (task.author.name, task.id))
try:
if type(task) is Message:
formatter = libformatter.get_format(task.body.split(" ")[1])
submission = reddit.submission(url=task.body.split(" ")[0])
else:
formatter = libformatter.get_format(task.body.split("/u/%s" % config.username)[-1])
submission = task.submission
body = bot_formatter(submission.id, reddit, formatter)
task.mark_read()
except (ClientException, NotFound):
task.reply("**Error**: Your query is invalid or the post you have provided is no longer available.")
task.mark_read()
return False
except IndexError:
task.reply("**Error**: No filetype was provided.")
task.mark_read()
return False
if body['text'] is None:
reply = "**Error**: Invalid filetype given for archival."
else:
url_file = host.upload(bytes(body['text'], 'utf-8'), name=task.id + body['filetype'])
reply = "[Archived Thread](%s)" % url_file
if config.privatekey is not None:
signed = bytes(str(crypto_sign(body['text'], config.privatekey, None)), 'utf-8')
url_signed = host.upload(signed, name=str(task.id) + '.signed')
reply += " | [Signed](%s)" % url_signed
reply += bottomtext
if type(task) is Message:
task.reply(reply)
else:
reddit.comment(id=task.id).reply(reply)
return True
def run():
logger.info("Bot started")
reddit = make_reddit()
while True:
try:
for message in reddit.inbox.unread(limit=None):
if message.new:
task_worker(message, reddit)
except APIException:
logging.error("Ratelimit hit or Reddit API experiencing outage. Sleeping for 10 minutes")
time.sleep(600)
logging.info("Bot restarted")
except Forbidden:
logging.warning("Comment invalid, could not reply")
except HostException:
logging.error("Host error, sleeping for 10 minutes")
time.sleep(600)
logging.info("Bot restarted")
```
#### File: ArchiveIt/archiveit/hosts.py
```python
import requests
from requests.exceptions import RequestException
import ftplib
from io import BytesIO
import time
import logging
logger = logging.getLogger("archiveit.host")
class HostException(Exception):
pass
class Host():
def __init__(self):
pass
def upload(self, file: bytes, name=None):
'''
Uploads data to the hosting provider, returns a URL.
:param file: The data to send, as bytes.
:param name: The name of the file (optional)
:return: A string, the URL.
'''
raise NotImplementedError()
class ZeroXZero(Host):
def __init__(self):
super(ZeroXZero).__init__()
def upload(self, file, name=None):
data = {'file': file}
try:
r = requests.post("https://0x0.st", files=data)
except RequestException:
logger.error("Could not connect to host 0x0")
raise HostException("Could not connect to host")
if str(r) != "<Response [200]>":
logger.error("Host 0x0 refused to upload data")
raise HostException("Host refused request")
return r.text
class Local_Storage(Host):
def __init__(self):
super(Host).__init__()
def upload(self, file, name=None):
try:
with open("config/local.txt") as f:
data = f.read()
dirc = data.split("\n")[0]
except FileNotFoundError:
logger.critical("The 'local file' host requires a configuration file 'local.txt'."
"See readme for more information.")
raise FileNotFoundError("Host config file not found")
if name is None:
name = str(int(time.time()))
with open(dirc + "/" + name, "wb") as f:
f.write(file)
return dirc + "/" + name
class FTP(Host):
def __init__(self):
super(Host).__init__()
def upload(self, file, name=None):
try:
with open("config/ftp.txt") as f:
data = f.read()
server = data.split("\n")[0]
username = data.split("\n")[1]
password = data.split("\n")[2]
path = data.split("\n")[3]
except FileNotFoundError:
logger.critical("The 'FTP' host requires a configuration file 'ftp.txt'."
"See readme for more information.")
raise FileNotFoundError("Host config file not found")
pass
session = ftplib.FTP(server)
session.login(username, password)
# https://stackoverflow.com/a/32481928
try:
session.mkd(path)
except ftplib.error_perm as e:
if not e.args[0].startswith('550'):
logger.error("Host FTP refused to upload data")
raise HostException("Could not create directory in FTP")
session.cwd(path)
data = BytesIO(file)
if not name:
name = str(int(time.time()))
session.storbinary("STOR %s" % name, data)
session.close()
return server + "/" + path + "/" + name
hosts = {"0x0": ZeroXZero, "local": Local_Storage, "ftp": FTP}
``` |
{
"source": "Jman420/fractimation",
"score": 2
} |
#### File: fractimation/data_models/complex_polynomial_iteration_data.py
```python
class ComplexPolynomialIterationData(object):
iteration_values = None
exploded_indexes = None
remaining_indexes = None
def __init__(self, iteration_values, exploded_indexes, remaining_indexes):
self.iteration_values = iteration_values
self.exploded_indexes = exploded_indexes
self.remaining_indexes = remaining_indexes
def get_iteration_values(self):
return self.iteration_values
def get_exploded_indexes(self):
return self.exploded_indexes
def get_remaining_indexes(self):
return self.remaining_indexes
```
#### File: fractimation/data_models/complex_range.py
```python
class ComplexRange(object):
"""description of class"""
real_number_values = None
imaginary_number_values = None
def __init__(self, real_number_values, imaginary_number_values):
self.real_number_values = real_number_values
self.imaginary_number_values = imaginary_number_values
def get_real_number_values(self):
return self.real_number_values
def get_imaginary_number_values(self):
return self.imaginary_number_values
```
#### File: fractimation/functionality/zoomable_complex_range.py
```python
import numpy
from ..data_models.complex_range_params import ComplexRangeParams
from ..data_models.dimension_params import DimensionParams
def _reinitialize_renderer(renderer, fractal_iterable, z_values_range_params,
c_values_range_params):
dimension_params = fractal_iterable.get_dimension_params()
dimension_params.initialize()
fractal_iterable.initialize(z_values_range_params, c_values_range_params, dimension_params,
fractal_iterable.get_formula_params(),
fractal_iterable.get_max_iterations())
renderer.initialize(fractal_iterable)
class ZoomableComplexRange():
"""Base Class for Zoomable Complex Polynomial Fractal Equation Renderers"""
_renderer = None
_zoom_cache = None
def __init__(self, renderer):
self._zoom_cache = []
self._renderer = renderer
def zoom_in(self, top_left_x, top_left_y, bottom_right_x, bottom_right_y):
fractal_iterable = self._renderer.get_fractal_iterable()
prev_zoom = ZoomCacheItem(fractal_iterable.get_z_values_range_params(),
fractal_iterable.get_c_values_range_params())
z_values_range = fractal_iterable.get_z_values_range()
z_min_real_num = z_values_range.real_number_values[top_left_x][top_left_y]
z_max_real_num = z_values_range.real_number_values[bottom_right_x][bottom_right_y]
z_min_imaginary_num = z_values_range.imaginary_number_values[top_left_x][top_left_y]
z_max_imaginary_num = z_values_range.imaginary_number_values[bottom_right_x][bottom_right_y]
c_values_range = fractal_iterable.get_c_values_range()
c_min_real_num = c_values_range.real_number_values[top_left_x][top_left_y]
c_max_real_num = c_values_range.real_number_values[bottom_right_x][bottom_right_y]
c_min_imaginary_num = c_values_range.imaginary_number_values[top_left_x][top_left_y]
c_max_imaginary_num = c_values_range.imaginary_number_values[bottom_right_x][bottom_right_y]
z_values_range_params = fractal_iterable.get_z_values_range_params()
new_z_values_range_params = ComplexRangeParams(z_min_real_num, z_max_real_num,
z_min_imaginary_num, z_max_imaginary_num,
z_values_range_params.spacing_func)
c_values_range_params = fractal_iterable.get_c_values_range_params()
new_c_values_range_params = ComplexRangeParams(c_min_real_num, c_max_real_num,
c_min_imaginary_num, c_max_imaginary_num,
c_values_range_params.spacing_func)
_reinitialize_renderer(self._renderer, fractal_iterable, new_z_values_range_params,
new_c_values_range_params)
self._zoom_cache.append(prev_zoom)
def zoom_out(self):
if len(self._zoom_cache) < 1:
return False
prev_zoom = self._zoom_cache.pop()
fractal_iterable = self._renderer.get_fractal_iterable()
_reinitialize_renderer(self._renderer, fractal_iterable, prev_zoom.z_values_range_params,
prev_zoom.c_values_range_params)
return True
class ZoomCacheItem(object):
c_values_range_params = None
z_values_range_params = None
def __init__(self, z_values_range_params, c_values_range_params):
self.z_values_range_params = z_values_range_params
self.c_values_range_params = c_values_range_params
```
#### File: fractimation/iterators/complex_polynomial.py
```python
import numpy
from .base.fractal_formula import FractalFormulaIterable, FractalFormulaIterator
from ..data_models.complex_polynomial_iteration_data import ComplexPolynomialIterationData
from ..helpers.fractal_algorithm import evaluate_polynomial_1d
from ..helpers.list_tools import remove_indexes
_FRACTAL_NAME = "Generic Complex Polynomial"
class ComplexPolynomialIterable(FractalFormulaIterable):
def get_fractal_name(self):
return _FRACTAL_NAME
def __iter__(cls):
return ComplexPolynomialIterator(cls._z_values_range, cls._c_values_range,
cls._formula_params, cls._max_iterations)
class ComplexPolynomialIterator(FractalFormulaIterator):
_formula_params = None
def __init__(self, z_values_range, c_values_range, formula_params, max_iterations = None):
super().__init__(z_values_range, c_values_range, max_iterations)
self._formula_params = formula_params
def __next__(cls):
super().__next__()
if len(cls._z_values) < 1:
return None
formula_params = cls._formula_params
z_values_new = evaluate_polynomial_1d(formula_params.coefficient_array,
cls._z_values,
cls._c_values)
exploded_indexes = numpy.abs(z_values_new) > formula_params.escape_value
remaining_indexes = ~exploded_indexes
reduced_arrays = remove_indexes([z_values_new, cls._c_values], remaining_indexes)
cls._z_values, cls._c_values = reduced_arrays
cls._next_iteration += 1
return ComplexPolynomialIterationData(z_values_new, exploded_indexes, remaining_indexes)
```
#### File: fractimation/renderers/cached_image_renderer.py
```python
import numpy
from .base.cached_renderer import CachedRenderer
from ..data_models.image_params import ImageParams
from ..helpers.list_tools import update_indexes_with_value, remove_indexes
_IMAGE_ORIGIN = "upper"
class CachedImageRenderer(CachedRenderer):
_dimension_params = None
_image_params = None
_image_array = None
_image_canvas = None
def __init__(self, image_axes, fractal_iterable, dimension_params, image_params=None):
super().__init__(image_axes)
if image_params is None:
image_params = ImageParams()
self._dimension_params = dimension_params
self._image_params = image_params
temp_image = numpy.zeros([self._dimension_params.width, self._dimension_params.height],
dtype=int)
self._image_canvas = self._render_axes.imshow(temp_image.T, cmap=self._image_params.color_map)
self.initialize(fractal_iterable)
def initialize(self, fractal_iterable):
super().initialize(fractal_iterable)
image_array = numpy.zeros([self._dimension_params.width, self._dimension_params.height],
dtype=int)
image_array = numpy.add(image_array, self._image_params.initial_value)
self._image_array = image_array
initial_image = numpy.copy(self._image_array)
rotated_image = initial_image.T
self._render_cache.append(rotated_image)
self._image_canvas.set_data(rotated_image)
self._image_canvas.autoscale()
def render_to_canvas(self, frame_num, canvas):
if frame_num >= len(self._render_cache):
for frame_counter in range(len(self._render_cache), frame_num + 1):
self.render_to_cache()
frame_image = self._render_cache[frame_num]
self._image_canvas.set_data(frame_image)
self._image_canvas.autoscale()
def render_to_cache(self):
iteration_data = self._fractal_iterator.__next__()
frame_num = len(self._render_cache)
if iteration_data is None:
last_image = self._render_cache[-1]
self._render_cache.append(last_image)
else:
dimension_params = self._dimension_params
exploded_indexes = iteration_data.exploded_indexes
exploded_x_indexes = dimension_params.x_indexes[exploded_indexes]
exploded_y_indexes = dimension_params.y_indexes[exploded_indexes]
self._image_array[exploded_x_indexes, exploded_y_indexes] = frame_num
if self._image_params.recolor_image:
final_image = update_indexes_with_value(self._image_array,
self._image_params.initial_value,
frame_num + 1)
else:
final_image = numpy.copy(self._image_array)
rotated_image = final_image.T
self._render_cache.append(rotated_image)
reducable_arrays = [dimension_params.x_indexes, dimension_params.y_indexes]
remaining_indexes = iteration_data.remaining_indexes
reduced_arrays = remove_indexes(reducable_arrays, remaining_indexes)
dimension_params.x_indexes = reduced_arrays[0]
dimension_params.y_indexes = reduced_arrays[1]
```
#### File: fractimation/renderers/cached_patch_collection_renderer.py
```python
from abc import ABC, abstractmethod
from .cached_renderer import CachedRenderer
class CachedPatchCollectionRenderer(CachedRenderer, ABC):
"""Base class for Fratal Renderers using Matpotlib PatchCollections for rendering"""
_cache_added_to_axes = False
def initialize(self):
super().initialize()
self._cache_added_to_axes = False
@abstractmethod
def iterate(self):
super().iterate()
@abstractmethod
def preheat_render_cache(self, max_iterations):
super().preheat_render_cache(max_iterations)
self._cache_added_to_axes = False
def render(self, frame_num, axes):
if not self._cache_added_to_axes:
for frame_counter in range(0, len(self._render_cache)):
frame_patches = self._render_cache[frame_counter]
axes.add_collection(frame_patches)
self._cache_added_to_axes = True
if not frame_num in self._render_cache:
for frame_counter in range(self._next_iteration_index, frame_num + 1):
self.iterate()
frame_patches = self._render_cache[frame_counter]
axes.add_collection(frame_patches)
for frame_counter in range(0, len(self._render_cache)):
frame_patches = self._render_cache[frame_counter]
frame_patches.set_visible(frame_counter <= frame_num)
``` |
{
"source": "Jman420/plotplayer",
"score": 3
} |
#### File: plotplayer/helpers/ui_helper.py
```python
from tkinter.filedialog import asksaveasfilename
from matplotlib import pyplot
ALL_FILES_EXTENSION = '*.*'
ALL_FILES_TYPE = ['All Files', ALL_FILES_EXTENSION]
def get_save_dialog_result(title, default_file_name, file_types=None,
default_extension=ALL_FILES_EXTENSION):
"""
Displays a Save File Dialog and returns the resulting file name
"""
if file_types is None:
file_types = [ALL_FILES_TYPE]
save_file_name = asksaveasfilename(title=title, filetypes=file_types,
defaultextension=default_extension,
initialfile=default_file_name)
return save_file_name
def show_players(blocking=True):
"""
Shows all Pyplot figures. Wraps the call in a try-catch block to avoid crashes.
"""
try:
pyplot.show(blocking)
except AttributeError:
print('Plotplayer encountered a playback error.')
print('This is usually due to a plotplayer window getting closed ' +
'during animation playback...')
print('This causes all open plotplayer windows to malfunction.')
print('Closing all plotplayer windows...')
pyplot.close('all')
```
#### File: plotplayer/managers/animation_manager.py
```python
from matplotlib.animation import FuncAnimation
from ..helpers import ui_helper, file_helper
VIDEO_EXTENSION = '.mp4'
HTML_EXTENSION = '.html'
JAVASCRIPT_EXTENSION = '.js.html'
SAVE_DIALOG_TITLE = 'Select video file to save'
VIDEO_FILE_TYPE = ['MP4 Video', '*{}'.format(VIDEO_EXTENSION)]
HTML_FILE_TYPE = ['HTML File', '*{}'.format(HTML_EXTENSION)]
JAVASCRIPT_FILE_TYPE = ['Javascript HTML File', '*{}'.format(JAVASCRIPT_EXTENSION)]
class AnimationManager(object):
"""
Animation Manager for PlotPlayer Windows
Public Methods:
* initialize - Initialize the Animation Manager for playback
* render - Render a specific frame of the associated animation
* play - Begin playback from the current position; restarts from beginning if current
position is the last frame
* stop - Stop playback at its current position
* toggle_playback - Toggle between play and stop states
* get_frame_number - Returns the current frame number
* get_total_frames - Returns the total number of frames in the current animation
* get_html - Returns the current animation in HTML5 Video
* get_javascript - Returns the current animation in Javascript Video
* save_video - Saves the current animation to file as Video
* save_html - Saves the current animation to file as HTML5 Video
* save javascript - Saves the current animation to file as Javascript Video
"""
_figure = None
_render_handler = None
_frame_num = None
_animation_params = None
_animation = None
_playing = False
def __init__(self, figure, render_handler):
"""
Constructor
Parameters:
* figure - Instance of Pyplot figure associated with the draw canvas
* render_handler - Instance of RenderManager associated with the current animation
"""
self._figure = figure
self._render_handler = render_handler
def initialize(self, animation_params):
"""
Initialize the Animation Manager for Playback
Parameters:
* animation_params - Instance of AnimationParams
"""
self.stop()
self._animation_params = animation_params
self._frame_num = 0
self._playing = False
def render(self, frame_num):
"""
Render a specific frame of the animation
Parameters:
* frame_num - The frame number to render
"""
if frame_num < self._animation_params.min_frame_number:
frame_num = self._animation_params.min_frame_number
elif frame_num > self._animation_params.max_frame_number:
frame_num = self._animation_params.max_frame_number
self._frame_num = int(round(frame_num))
total_frames = self.get_total_frames()
self._render_handler.render(self._frame_num, total_frames)
if self._frame_num == self._animation_params.max_frame_number:
self._playing = False
def play(self):
"""
Begin playback from the current frame; restart playback from beginning if at the end
"""
if self._playing:
return
if self._frame_num == self._animation_params.max_frame_number:
self._frame_num = 0
frames_to_play = range(self._frame_num, self._animation_params.max_frame_number + 1)
animation = FuncAnimation(self._figure, self.render, frames_to_play,
interval=1000 // self._animation_params.frame_rate, repeat=False)
self._playing = True
self._animation = animation
self._figure.canvas.draw()
def stop(self):
"""
Stop playback at the current position
"""
if not self._playing:
return
self._animation.event_source.stop()
self._playing = False
def toggle_playback(self):
"""
Toggle between play and stop states
"""
if self._playing:
self.stop()
else:
self.play()
def get_frame_number(self):
"""
Returns the current frame number
"""
return self._frame_num
def get_min_frame_number(self):
"""
Returns the minimum frame number in the current animation
"""
return self._animation_params.min_frame_number
def get_max_frame_number(self):
"""
Returns the maximum frame number in the current animation
"""
return self._animation_params.max_frame_number
def get_total_frames(self):
total_frames = (self._animation_params.max_frame_number -
self._animation_params.min_frame_number)
return total_frames
def get_html(self):
"""
Returns the current animation in HTML5 Video format
"""
html = self._animation.to_html5_video()
return html
def get_javascript(self):
"""
Returns the current animation in Javascript Video format
"""
javascript = self._animation.to_jshtml()
return javascript
def save_video(self, file_name=None, writer=None):
"""
Saves the current animation to file as Video
Parameters:
* file_name (optional) - Indicates the file name to write the video to; will prompt
if omitted
* writer (optional) - Specifies the video writer for Matplotlib to use to write the video
"""
self.stop()
if file_name is None:
file_types = [VIDEO_FILE_TYPE, ui_helper.ALL_FILES_TYPE]
animation_name = self._animation_params.animation_name
file_name = ui_helper.get_save_dialog_result(SAVE_DIALOG_TITLE,
animation_name + VIDEO_EXTENSION,
file_types, VIDEO_EXTENSION)
self._animation.save(file_name, writer)
def save_html(self, file_name=None):
"""
Saves the current animation to file as HTML5 Video
Parameters:
* file_name (optional) - Indicates the file name to write the video to; will prompt
if omitted
"""
self.stop()
if file_name is None:
file_types = [HTML_FILE_TYPE, ui_helper.ALL_FILES_TYPE]
animation_name = self._animation_params.animation_name
file_name = ui_helper.get_save_dialog_result(SAVE_DIALOG_TITLE,
animation_name + HTML_EXTENSION,
file_types, HTML_EXTENSION)
video_html = self.get_html()
file_helper.save_file(file_name, video_html)
def save_javascript(self, file_name=None):
"""
Saves the current animation to file as Javascript Video
Parameters:
* file_name (optional) - Indicates the file name to write the video to; will prompt
if omitted
"""
self.stop()
if file_name is None:
default_file_name = self._animation_params.animation_name + JAVASCRIPT_EXTENSION
file_types = [JAVASCRIPT_FILE_TYPE, ui_helper.ALL_FILES_TYPE]
file_name = ui_helper.get_save_dialog_result(SAVE_DIALOG_TITLE, default_file_name,
file_types, JAVASCRIPT_EXTENSION)
video_javascript = self.get_javascript()
file_helper.save_file(file_name, video_javascript)
``` |
{
"source": "JManAn/Ingener-a-de-Software-TF",
"score": 2
} |
#### File: T PC Windows Version/renpy/atl.py
```python
from __future__ import print_function
import renpy.display
import renpy.pyanalysis
import random
def compiling(loc):
file, number = loc # @ReservedAssignment
renpy.game.exception_info = "Compiling ATL code at %s:%d" % (file, number)
def executing(loc):
file, number = loc # @ReservedAssignment
renpy.game.exception_info = "Executing ATL code at %s:%d" % (file, number)
# A map from the name of a time warp function to the function itself.
warpers = { }
def atl_warper(f):
name = f.func_name
warpers[name] = f
return f
# The pause warper is used internally when no other warper is
# specified.
@atl_warper
def pause(t):
if t < 1.0:
return 0.0
else:
return 1.0
@atl_warper
def instant(t):
return 1.0
position = renpy.object.Sentinel("position")
def any_object(x):
return x
def bool_or_none(x):
if x is None:
return x
return bool(x)
def float_or_none(x):
if x is None:
return x
return float(x)
# A dictionary giving property names and the corresponding default
# values.
PROPERTIES = {
"pos" : (position, position),
"xpos" : position,
"ypos" : position,
"anchor" : (position, position),
"xanchor" : position,
"yanchor" : position,
"xaround" : position,
"yaround" : position,
"xanchoraround" : float,
"yanchoraround" : float,
"align" : (float, float),
"xalign" : float,
"yalign" : float,
"rotate" : float,
"rotate_pad" : bool,
"transform_anchor" : bool,
"xzoom" : float,
"yzoom" : float,
"zoom" : float,
"nearest" : bool_or_none,
"alpha" : float,
"additive" : float,
"around" : (position, position),
"alignaround" : (float, float),
"angle" : float,
"radius" : float,
"crop" : (float, float, float, float),
"crop_relative" : bool,
"size" : (int, int),
"maxsize" : (int, int),
"corner1" : (float, float),
"corner2" : (float, float),
"subpixel" : bool,
"delay" : float,
"xoffset" : float,
"yoffset" : float,
"offset" : (int, int),
"xcenter" : position,
"ycenter" : position,
"debug" : any_object,
"events" : bool,
"xpan" : float_or_none,
"ypan" : float_or_none,
"xtile" : int,
"ytile" : int,
}
def correct_type(v, b, ty):
"""
Corrects the type of v to match ty. b is used to inform the match.
"""
if ty is position:
if v is None:
return None
else:
return type(b)(v)
else:
return ty(v)
def interpolate(t, a, b, type): # @ReservedAssignment
"""
Linearly interpolate the arguments.
"""
# Recurse into tuples.
if isinstance(b, tuple):
if a is None:
a = [ None ] * len(b)
return tuple(interpolate(t, i, j, ty) for i, j, ty in zip(a, b, type))
# Deal with booleans, nones, etc.
elif b is None or isinstance(b, (bool, basestring)):
if t >= 1.0:
return b
else:
return a
# Interpolate everything else.
else:
if a is None:
a = 0
return correct_type(a + t * (b - a), b, type)
# Interpolate the value of a spline. This code is based on Aenakume's code,
# from 00splines.rpy.
def interpolate_spline(t, spline):
if isinstance(spline[-1], tuple):
return tuple(interpolate_spline(t, i) for i in zip(*spline))
if spline[0] is None:
return spline[-1]
if len(spline) == 2:
t_p = 1.0 - t
rv = t_p * spline[0] + t * spline[-1]
elif len(spline) == 3:
t_pp = (1.0 - t)**2
t_p = 2 * t * (1.0 - t)
t2 = t**2
rv = t_pp * spline[0] + t_p * spline[1] + t2 * spline[2]
elif len(spline) == 4:
t_ppp = (1.0 - t)**3
t_pp = 3 * t * (1.0 - t)**2
t_p = 3 * t**2 * (1.0 - t)
t3 = t**3
rv = t_ppp * spline[0] + t_pp * spline[1] + t_p * spline[2] + t3 * spline[3]
else:
raise Exception("ATL can't interpolate splines of length %d." % len(spline))
return correct_type(rv, spline[-1], position)
# A list of atl transforms that may need to be compile.
compile_queue = [ ]
def compile_all():
"""
Called after the init phase is finished and transforms are compiled,
to compile all transforms.
"""
global compile_queue
for i in compile_queue:
if i.atl.constant == GLOBAL_CONST:
i.compile()
compile_queue = [ ]
# This is the context used when compiling an ATL statement. It stores the
# scopes that are used to evaluate the various expressions in the statement,
# and has a method to do the evaluation and return a result.
class Context(object):
def __init__(self, context):
self.context = context
def eval(self, expr): # @ReservedAssignment
expr = renpy.python.escape_unicode(expr)
return eval(expr, renpy.store.__dict__, self.context) # @UndefinedVariable
def __eq__(self, other):
if not isinstance(other, Context):
return False
return self.context == other.context
def __ne__(self, other):
return not (self == other)
# This is intended to be subclassed by ATLTransform. It takes care of
# managing ATL execution, which allows ATLTransform itself to not care
# much about the contents of this file.
class ATLTransformBase(renpy.object.Object):
# Compatibility with older saves.
parameters = renpy.ast.ParameterInfo([ ], [ ], None, None)
parent_transform = None
atl_st_offset = 0
# The block, as first compiled for prediction.
predict_block = None
nosave = [ 'parent_transform' ]
def __init__(self, atl, context, parameters):
# The constructor will be called by atltransform.
if parameters is None:
parameters = ATLTransformBase.parameters
# The parameters that we take.
self.parameters = parameters
# The raw code that makes up this ATL statement.
self.atl = atl
# The context in which execution occurs.
self.context = Context(context)
# The code after it has been compiled into a block.
self.block = None
# The same thing, but only if the code was compiled into a block
# for prediction purposes only.
self.predict_block = None
# The properties of the block, if it contains only an
# Interpolation.
self.properties = None
# The state of the statement we are executing. As this can be
# shared between more than one object (in the case of a hide),
# the data must not be altered.
self.atl_state = None
# Are we done?
self.done = False
# The transform event we are going to process.
self.transform_event = None
# The transform event we last processed.
self.last_transform_event = None
# The child transform event we last processed.
self.last_child_transform_event = None
# The child, without any transformations.
self.raw_child = None
# The parent transform that was called to create this transform.
self.parent_transform = None
# The offset between st and when this ATL block first executed.
self.atl_st_offset = 0
if renpy.game.context().init_phase:
compile_queue.append(self)
def _handles_event(self, event):
if (self.block is not None) and (self.block._handles_event(event)):
return True
if self.child is None:
return False
return self.child._handles_event(event)
def get_block(self):
"""
Returns the compiled block to use.
"""
if self.block:
return self.block
elif self.predict_block and renpy.display.predict.predicting:
return self.predict_block
else:
return None
def take_execution_state(self, t):
"""
Updates self to begin executing from the same point as t. This
requires that t.atl is self.atl.
"""
super(ATLTransformBase, self).take_execution_state(t)
self.atl_st_offset = None
if self is t:
return
elif not isinstance(t, ATLTransformBase):
return
elif t.atl is not self.atl:
return
# Important to do it this way, so we use __eq__. The exception handling
# optimistically assumes that uncomparable objects are the same.
try:
if not (t.context == self.context):
return
except:
pass
self.done = t.done
self.block = t.block
self.atl_state = t.atl_state
self.transform_event = t.transform_event
self.last_transform_event = t.last_transform_event
self.last_child_transform_event = t.last_child_transform_event
self.st = t.st
self.at = t.at
self.st_offset = t.st_offset
self.at_offset = t.at_offset
self.atl_st_offset = t.atl_st_offset
if self.child is renpy.display.motion.null:
self.child = t.child
self.raw_child = t.raw_child
def __call__(self, *args, **kwargs):
_args = kwargs.pop("_args", None)
context = self.context.context.copy()
for k, v in self.parameters.parameters:
if v is not None:
context[k] = renpy.python.py_eval(v)
positional = list(self.parameters.positional)
args = list(args)
child = None
if not positional and args:
child = args.pop(0)
# Handle positional arguments.
while positional and args:
name = positional.pop(0)
value = args.pop(0)
if name in kwargs:
raise Exception('Parameter %r is used as both a positional and keyword argument to a transition.' % name)
context[name] = value
if args:
raise Exception("Too many arguments passed to ATL transform.")
# Handle keyword arguments.
for k, v in kwargs.iteritems():
if k in positional:
positional.remove(k)
context[k] = v
elif k in context:
context[k] = v
elif k == 'child':
child = v
else:
raise Exception('Parameter %r is not known by ATL Transform.' % k)
if child is None:
child = self.child
# Create a new ATL Transform.
parameters = renpy.ast.ParameterInfo({ }, positional, None, None)
rv = renpy.display.motion.ATLTransform(
atl=self.atl,
child=child,
style=self.style_arg,
context=context,
parameters=parameters,
_args=_args,
)
rv.parent_transform = self
rv.take_state(self)
return rv
def compile(self): # @ReservedAssignment
"""
Compiles the ATL code into a block. As necessary, updates the
properties.
"""
constant = (self.atl.constant == GLOBAL_CONST)
if not constant:
for p in self.parameters.positional:
if p not in self.context.context:
raise Exception("Cannot compile ATL Transform at %s:%d, as it's missing positional parameter %s." % (
self.atl.loc[0],
self.atl.loc[1],
self.parameters.positional[0],
))
if constant and self.parent_transform:
if self.parent_transform.block:
self.block = self.parent_transform.block
self.properties = self.parent_transform.properties
self.parent_transform = None
return self.block
old_exception_info = renpy.game.exception_info
block = self.atl.compile(self.context)
if all(
isinstance(statement, Interpolation) and statement.duration == 0
for statement in block.statements
):
self.properties = []
for interp in block.statements:
self.properties.extend(interp.properties)
if not constant and renpy.display.predict.predicting:
self.predict_block = block
else:
self.block = block
self.predict_block = None
renpy.game.exception_info = old_exception_info
if constant and self.parent_transform:
self.parent_transform.block = self.block
self.parent_transform.properties = self.properties
self.parent_transform = None
return block
def execute(self, trans, st, at):
if self.done:
return None
block = self.get_block()
if block is None:
block = self.compile()
events = [ ]
# Hide request.
if trans.hide_request:
self.transform_event = "hide"
if trans.replaced_request:
self.transform_event = "replaced"
# Notice transform events.
if renpy.config.atl_multiple_events:
if self.transform_event != self.last_transform_event:
events.append(self.transform_event)
self.last_transform_event = self.transform_event
# Propagate transform_events from children.
if (self.child is not None) and self.child.transform_event != self.last_child_transform_event:
self.last_child_transform_event = self.child.transform_event
if self.child.transform_event is not None:
self.transform_event = self.child.transform_event
# Notice transform events, again.
if self.transform_event != self.last_transform_event:
events.append(self.transform_event)
self.last_transform_event = self.transform_event
if self.transform_event in renpy.config.repeat_transform_events:
self.transform_event = None
self.last_transform_event = None
old_exception_info = renpy.game.exception_info
if (self.atl_st_offset is None) or (st - self.atl_st_offset) < 0:
self.atl_st_offset = st
if self.atl.animation:
timebase = at
else:
timebase = st - self.atl_st_offset
action, arg, pause = block.execute(trans, timebase, self.atl_state, events)
renpy.game.exception_info = old_exception_info
if action == "continue" and not renpy.display.predict.predicting:
self.atl_state = arg
else:
self.done = True
return pause
def predict_one(self):
self.atl.predict(self.context)
def visit(self):
block = self.get_block()
if block is None:
block = self.compile()
return self.children + block.visit()
# This is used in mark_constant to analyze expressions for constness.
is_constant_expr = renpy.pyanalysis.Analysis().is_constant_expr
GLOBAL_CONST = renpy.pyanalysis.GLOBAL_CONST
# The base class for raw ATL statements.
class RawStatement(object):
constant = None
def __init__(self, loc):
super(RawStatement, self).__init__()
self.loc = loc
# Compiles this RawStatement into a Statement, by using ctx to
# evaluate expressions as necessary.
def compile(self, ctx): # @ReservedAssignment
raise Exception("Compile not implemented.")
# Predicts the images used by this statement.
def predict(self, ctx):
return
def mark_constant(self):
"""
Sets self.constant to true if all expressions used in this statement
and its children are constant.
"""
self.constant = 0
# The base class for compiled ATL Statements.
class Statement(renpy.object.Object):
def __init__(self, loc):
super(Statement, self).__init__()
self.loc = loc
# trans is the transform we're working on.
# st is the time since this statement started executing.
# state is the state stored by this statement, or None if
# we've just started executing this statement.
# event is an event we're triggering.
#
# "continue", state, pause - Causes this statement to execute
# again, with the given state passed in the second time around.
#
#
# "next", timeleft, pause - Causes the next statement to execute,
# with timeleft being the amount of time left after this statement
# finished.
#
# "event", (name, timeleft), pause - Causes an event to be reported,
# and control to head up to the event handler.
#
# "repeat", (count, timeleft), pause - Causes the repeat behavior
# to occur.
#
# As the Repeat statement can only appear in a block, only Block
# needs to deal with the repeat behavior.
#
# Pause is the amount of time until execute should be called again,
# or None if there's no need to call execute ever again.
def execute(self, trans, st, state, events):
raise Exception("Not implemented.")
# Return a list of displayable children.
def visit(self):
return [ ]
# Does this respond to an event?
def _handles_event(self, event):
return False
# This represents a Raw ATL block.
class RawBlock(RawStatement):
# Should we use the animation timebase or the showing timebase?
animation = False
def __init__(self, loc, statements, animation):
super(RawBlock, self).__init__(loc)
# A list of RawStatements in this block.
self.statements = statements
self.animation = animation
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
statements = [ i.compile(ctx) for i in self.statements ]
return Block(self.loc, statements)
def predict(self, ctx):
for i in self.statements:
i.predict(ctx)
def mark_constant(self):
constant = GLOBAL_CONST
for i in self.statements:
i.mark_constant()
constant = min(constant, i.constant)
self.constant = constant
# A compiled ATL block.
class Block(Statement):
def __init__(self, loc, statements):
super(Block, self).__init__(loc)
# A list of statements in the block.
self.statements = statements
# The start times of various statements.
self.times = [ ]
for i, s in enumerate(statements):
if isinstance(s, Time):
self.times.append((s.time, i + 1))
self.times.sort()
def _handles_event(self, event):
for i in self.statements:
if i._handles_event(event):
return True
return False
def execute(self, trans, st, state, events):
executing(self.loc)
# Unpack the state.
if state is not None:
index, start, loop_start, repeats, times, child_state = state
else:
index, start, loop_start, repeats, times, child_state = 0, 0, 0, 0, self.times[:], None
# What we might be returning.
action = "continue"
arg = None
pause = None
while action == "continue":
# Target is the time we're willing to execute to.
# Max_pause is how long we'll wait before executing again.
# If we have times queued up, then use them to inform target
# and time.
if times:
time, tindex = times[0]
target = min(time, st)
max_pause = time - target
# Otherwise, take the defaults.
else:
target = st
max_pause = 15
while True:
# If we've hit the last statement, it's the end of
# this block.
if index >= len(self.statements):
return "next", target - start, None
# Find the statement and try to run it.
stmt = self.statements[index]
action, arg, pause = stmt.execute(trans, target - start, child_state, events)
# On continue, persist our state.
if action == "continue":
if pause is None:
pause = max_pause
action, arg, pause = "continue", (index, start, loop_start, repeats, times, arg), min(max_pause, pause)
break
elif action == "event":
return action, arg, pause
# On next, advance to the next statement in the block.
elif action == "next":
index += 1
start = target - arg
child_state = None
# On repeat, either terminate the block, or go to
# the first statement.
elif action == "repeat":
count, arg = arg
loop_end = target - arg
duration = loop_end - loop_start
if duration <= 0:
raise Exception("ATL appears to be in an infinite loop.")
# Figure how many durations can occur between the
# start of the loop and now.
new_repeats = int((target - loop_start) / duration)
if count is not None:
if repeats + new_repeats >= count:
new_repeats = count - repeats
loop_start += new_repeats * duration
return "next", target - loop_start, None
repeats += new_repeats
loop_start = loop_start + new_repeats * duration
start = loop_start
index = 0
child_state = None
if times:
time, tindex = times[0]
if time <= target:
times.pop(0)
index = tindex
start = time
child_state = None
continue
return action, arg, pause
def visit(self):
return [ j for i in self.statements for j in i.visit() ]
# This can become one of four things:
#
# - A pause.
# - An interpolation (which optionally can also reference other
# blocks, as long as they're not time-dependent, and have the same
# arity as the interpolation).
# - A call to another block.
# - A command to change the image, perhaps with a transition.
#
# We won't decide which it is until runtime, as we need the
# values of the variables here.
class RawMultipurpose(RawStatement):
warp_function = None
def __init__(self, loc):
super(RawMultipurpose, self).__init__(loc)
self.warper = None
self.duration = None
self.properties = [ ]
self.expressions = [ ]
self.splines = [ ]
self.revolution = None
self.circles = "0"
def add_warper(self, name, duration, warp_function):
self.warper = name
self.duration = duration
self.warp_function = warp_function
def add_property(self, name, exprs):
self.properties.append((name, exprs))
def add_expression(self, expr, with_clause):
self.expressions.append((expr, with_clause))
def add_revolution(self, revolution):
self.revolution = revolution
def add_circles(self, circles):
self.circles = circles
def add_spline(self, name, exprs):
self.splines.append((name, exprs))
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
# Figure out what kind of statement we have. If there's no
# interpolator, and no properties, than we have either a
# call, or a child statement.
if (self.warper is None and
self.warp_function is None and
not self.properties and
not self.splines and
len(self.expressions) == 1):
expr, withexpr = self.expressions[0]
child = ctx.eval(expr)
if withexpr:
transition = ctx.eval(withexpr)
else:
transition = None
if isinstance(child, (int, float)):
return Interpolation(self.loc, "pause", child, [ ], None, 0, [ ])
child = renpy.easy.displayable(child)
if isinstance(child, ATLTransformBase):
child.compile()
return child.get_block()
else:
return Child(self.loc, child, transition)
compiling(self.loc)
# Otherwise, we probably have an interpolation statement.
if self.warp_function:
warper = ctx.eval(self.warp_function)
else:
warper = self.warper or "instant"
if warper not in warpers:
raise Exception("ATL Warper %s is unknown at runtime." % warper)
properties = [ ]
for name, expr in self.properties:
if name not in PROPERTIES:
raise Exception("ATL Property %s is unknown at runtime." % property)
value = ctx.eval(expr)
properties.append((name, value))
splines = [ ]
for name, exprs in self.splines:
if name not in PROPERTIES:
raise Exception("ATL Property %s is unknown at runtime." % property)
values = [ ctx.eval(i) for i in exprs ]
splines.append((name, values))
for expr, _with in self.expressions:
try:
value = ctx.eval(expr)
except:
raise Exception("Could not evaluate expression %r when compiling ATL." % expr)
if not isinstance(value, ATLTransformBase):
raise Exception("Expression %r is not an ATL transform, and so cannot be included in an ATL interpolation." % expr)
value.compile()
if value.properties is None:
raise Exception("ATL transform %r is too complicated to be included in interpolation." % expr)
properties.extend(value.properties)
duration = ctx.eval(self.duration)
circles = ctx.eval(self.circles)
return Interpolation(self.loc, warper, duration, properties, self.revolution, circles, splines)
def mark_constant(self):
constant = GLOBAL_CONST
constant = min(constant, is_constant_expr(self.warp_function))
constant = min(constant, is_constant_expr(self.duration))
constant = min(constant, is_constant_expr(self.circles))
for _name, expr in self.properties:
constant = min(constant, is_constant_expr(expr))
for _name, exprs in self.splines:
for expr in exprs:
constant = min(constant, is_constant_expr(expr))
for expr, withexpr in self.expressions:
constant = min(constant, is_constant_expr(expr))
constant = min(constant, is_constant_expr(withexpr))
self.constant = constant
def predict(self, ctx):
for i, _j in self.expressions:
try:
i = ctx.eval(i)
except:
continue
if isinstance(i, ATLTransformBase):
i.atl.predict(ctx)
return
try:
renpy.easy.predict(i)
except:
continue
# This lets us have an ATL transform as our child.
class RawContainsExpr(RawStatement):
def __init__(self, loc, expr):
super(RawContainsExpr, self).__init__(loc)
self.expression = expr
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
child = ctx.eval(self.expression)
return Child(self.loc, child, None)
def mark_constant(self):
self.constant = is_constant_expr(self.expression)
# This allows us to have multiple ATL transforms as children.
class RawChild(RawStatement):
def __init__(self, loc, child):
super(RawChild, self).__init__(loc)
self.children = [ child ]
def compile(self, ctx): # @ReservedAssignment
children = [ ]
for i in self.children:
children.append(renpy.display.motion.ATLTransform(i, context=ctx.context))
box = renpy.display.layout.MultiBox(layout='fixed')
for i in children:
box.add(i)
return Child(self.loc, box, None)
def mark_constant(self):
constant = GLOBAL_CONST
for i in self.children:
i.mark_constant()
constant = min(constant, i.constant)
self.constant = constant
# This changes the child of this statement, optionally with a transition.
class Child(Statement):
def __init__(self, loc, child, transition):
super(Child, self).__init__(loc)
self.child = child
self.transition = transition
def execute(self, trans, st, state, events):
executing(self.loc)
old_child = trans.raw_child
child = self.child
if child._duplicatable:
child = self.child._duplicate(trans._args)
child._unique()
if (old_child is not None) and (old_child is not renpy.display.motion.null) and (self.transition is not None):
child = self.transition(old_widget=old_child,
new_widget=child)
child._unique()
else:
child = child
trans.set_child(child, duplicate=False)
trans.raw_child = self.child
return "next", st, None
def visit(self):
return [ self.child ]
# This causes interpolation to occur.
class Interpolation(Statement):
def __init__(self, loc, warper, duration, properties, revolution, circles, splines):
super(Interpolation, self).__init__(loc)
self.warper = warper
self.duration = duration
self.properties = properties
self.splines = splines
# The direction we revolve in: cw, ccw, or None.
self.revolution = revolution
# The number of complete circles we make.
self.circles = circles
def execute(self, trans, st, state, events):
executing(self.loc)
warper = warpers.get(self.warper, self.warper)
if (self.warper != "instant") and (state is None) and (
(trans.atl_state is not None) or (trans.st == 0)
):
first = True
else:
first = False
if self.duration:
complete = min(1.0, st / self.duration)
else:
complete = 1.0
if complete < 0.0:
complete = 0.0
elif complete > 1.0:
complete = 1.0
complete = warper(complete)
if state is None:
# Create a new transform state, and apply the property
# changes to it.
newts = renpy.display.motion.TransformState()
newts.take_state(trans.state)
has_angle = False
for k, v in self.properties:
setattr(newts, k, v)
if k == "angle":
newts.last_angle = v
has_angle = True
# Now, the things we change linearly are in the difference
# between the new and old states.
linear = trans.state.diff(newts)
revolution = None
splines = [ ]
revdir = self.revolution
circles = self.circles
if (revdir or (has_angle and renpy.config.automatic_polar_motion)) and (newts.xaround is not None):
# Remove various irrelevant motions.
for i in [ 'xpos', 'ypos',
'xanchor', 'yanchor',
'xaround', 'yaround',
'xanchoraround', 'yanchoraround',
]:
linear.pop(i, None)
if revdir is not None:
# Ensure we rotate around the new point.
trans.state.xaround = newts.xaround
trans.state.yaround = newts.yaround
trans.state.xanchoraround = newts.xanchoraround
trans.state.yanchoraround = newts.yanchoraround
# Get the start and end angles and radii.
startangle = trans.state.angle
endangle = newts.angle
startradius = trans.state.radius
endradius = newts.radius
# Make sure the revolution is in the appropriate direction,
# and contains an appropriate number of circles.
if revdir == "clockwise":
if endangle < startangle:
startangle -= 360
startangle -= circles * 360
elif revdir == "counterclockwise":
if endangle > startangle:
startangle += 360
startangle += circles * 360
# Store the revolution.
revolution = (startangle, endangle, startradius, endradius)
else:
last_angle = trans.state.last_angle or trans.state.angle
revolution = (last_angle, newts.last_angle, trans.state.radius, newts.radius)
# Figure out the splines.
for name, values in self.splines:
splines.append((name, [ getattr(trans.state, name) ] + values))
state = (linear, revolution, splines)
# Ensure that we set things, even if they don't actually
# change from the old state.
for k, v in self.properties:
if k not in linear:
setattr(trans.state, k, v)
else:
linear, revolution, splines = state
# Linearly interpolate between the things in linear.
for k, (old, new) in linear.iteritems():
value = interpolate(complete, old, new, PROPERTIES[k])
setattr(trans.state, k, value)
# Handle the revolution.
if revolution is not None:
startangle, endangle, startradius, endradius = revolution
angle = interpolate(complete, startangle, endangle, float)
trans.state.last_angle = angle
trans.state.angle = angle
trans.state.radius = interpolate(complete, startradius, endradius, float)
# Handle any splines we might have.
for name, values in splines:
value = interpolate_spline(complete, values)
setattr(trans.state, name, value)
if ((not first) or (not renpy.config.atl_one_frame)) and (st >= self.duration):
return "next", st - self.duration, None
else:
if not self.properties and not self.revolution and not self.splines:
return "continue", state, max(0, self.duration - st)
else:
return "continue", state, 0
# Implementation of the repeat statement.
class RawRepeat(RawStatement):
def __init__(self, loc, repeats):
super(RawRepeat, self).__init__(loc)
self.repeats = repeats
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
repeats = self.repeats
if repeats is not None:
repeats = ctx.eval(repeats)
return Repeat(self.loc, repeats)
def mark_constant(self):
self.constant = is_constant_expr(self.repeats)
class Repeat(Statement):
def __init__(self, loc, repeats):
super(Repeat, self).__init__(loc)
self.repeats = repeats
def execute(self, trans, st, state, events):
return "repeat", (self.repeats, st), 0
# Parallel statement.
class RawParallel(RawStatement):
def __init__(self, loc, block):
super(RawParallel, self).__init__(loc)
self.blocks = [ block ]
def compile(self, ctx): # @ReservedAssignment
return Parallel(self.loc, [i.compile(ctx) for i in self.blocks])
def predict(self, ctx):
for i in self.blocks:
i.predict(ctx)
def mark_constant(self):
constant = GLOBAL_CONST
for i in self.blocks:
i.mark_constant()
constant = min(constant, i.constant)
self.constant = constant
class Parallel(Statement):
def __init__(self, loc, blocks):
super(Parallel, self).__init__(loc)
self.blocks = blocks
def _handles_event(self, event):
for i in self.blocks:
if i._handles_event(event):
return True
return False
def execute(self, trans, st, state, events):
executing(self.loc)
if state is None:
state = [ (i, None) for i in self.blocks ]
# The amount of time left after finishing this block.
left = [ ]
# The duration of the pause.
pauses = [ ]
# The new state structure.
newstate = [ ]
for i, istate in state:
action, arg, pause = i.execute(trans, st, istate, events)
if pause is not None:
pauses.append(pause)
if action == "continue":
newstate.append((i, arg))
elif action == "next":
left.append(arg)
elif action == "event":
return action, arg, pause
if newstate:
return "continue", newstate, min(pauses)
else:
return "next", min(left), None
def visit(self):
return [ j for i in self.blocks for j in i.visit() ]
# The choice statement.
class RawChoice(RawStatement):
def __init__(self, loc, chance, block):
super(RawChoice, self).__init__(loc)
self.choices = [ (chance, block) ]
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
return Choice(self.loc, [ (ctx.eval(chance), block.compile(ctx)) for chance, block in self.choices])
def predict(self, ctx):
for _i, j in self.choices:
j.predict(ctx)
def mark_constant(self):
constant = GLOBAL_CONST
for _chance, block in self.choices:
block.mark_constant()
constant = min(constant, block.constant)
self.constant = constant
class Choice(Statement):
def __init__(self, loc, choices):
super(Choice, self).__init__(loc)
self.choices = choices
def _handles_event(self, event):
for i in self.choices:
if i[1]._handles_event(event):
return True
return False
def execute(self, trans, st, state, events):
executing(self.loc)
if state is None:
total = 0
for chance, choice in self.choices:
total += chance
n = random.uniform(0, total)
for chance, choice in self.choices:
if n < chance:
break
n -= chance
cstate = None
else:
choice, cstate = state
action, arg, pause = choice.execute(trans, st, cstate, events)
if action == "continue":
return "continue", (choice, arg), pause
else:
return action, arg, None
def visit(self):
return [ j for i in self.choices for j in i[1].visit() ]
# The Time statement.
class RawTime(RawStatement):
def __init__(self, loc, time):
super(RawTime, self).__init__(loc)
self.time = time
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
return Time(self.loc, ctx.eval(self.time))
def mark_constant(self):
self.constant = is_constant_expr(self.time)
class Time(Statement):
def __init__(self, loc, time):
super(Time, self).__init__(loc)
self.time = time
def execute(self, trans, st, state, events):
return "continue", None, None
# The On statement.
class RawOn(RawStatement):
def __init__(self, loc, names, block):
super(RawOn, self).__init__(loc)
self.handlers = { }
for i in names:
self.handlers[i] = block
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
handlers = { }
for k, v in self.handlers.iteritems():
handlers[k] = v.compile(ctx)
return On(self.loc, handlers)
def predict(self, ctx):
for i in self.handlers.itervalues():
i.predict(ctx)
def mark_constant(self):
constant = GLOBAL_CONST
for block in self.handlers.itervalues():
block.mark_constant()
constant = min(constant, block.constant)
self.constant = constant
class On(Statement):
def __init__(self, loc, handlers):
super(On, self).__init__(loc)
self.handlers = handlers
def _handles_event(self, event):
if event in self.handlers:
return True
else:
return False
def execute(self, trans, st, state, events):
executing(self.loc)
# If it's our first time through, start in the start state.
if state is None:
name, start, cstate = ("start", st, None)
else:
name, start, cstate = state
# If we have an external event, and we have a handler for it,
# handle it.
for event in events:
if event in self.handlers:
# Do not allow people to abort the hide or replaced event.
lock_event = (name == "hide" and trans.hide_request) or (name == "replaced" and trans.replaced_request)
if not lock_event:
name = event
start = st
cstate = None
while True:
# If we don't have a handler, return until we change event.
if name not in self.handlers:
return "continue", (name, start, cstate), None
action, arg, pause = self.handlers[name].execute(trans, st - start, cstate, events)
# If we get a continue, save our state.
if action == "continue":
# If it comes from a hide block, indicate that.
if name == "hide" or name == "replaced":
trans.hide_response = False
trans.replaced_response = False
return "continue", (name, start, arg), pause
# If we get a next, then try going to the default
# event, unless we're already in default, in which case we
# go to None.
elif action == "next":
if name == "default" or name == "hide" or name == "replaced":
name = None
else:
name = "default"
start = st - arg
cstate = None
continue
# If we get an event, then either handle it if we can, or
# pass it up the stack if we can't.
elif action == "event":
name, arg = arg
if name in self.handlers:
start = max(st - arg, st - 30)
cstate = None
continue
return "event", (name, arg), None
def visit(self):
return [ j for i in self.handlers.itervalues() for j in i.visit() ]
# Event statement.
class RawEvent(RawStatement):
def __init__(self, loc, name):
super(RawEvent, self).__init__(loc)
self.name = name
def compile(self, ctx): # @ReservedAssignment
return Event(self.loc, self.name)
def mark_constant(self):
self.constant = GLOBAL_CONST
class Event(Statement):
def __init__(self, loc, name):
super(Event, self).__init__(loc)
self.name = name
def execute(self, trans, st, state, events):
return "event", (self.name, st), None
class RawFunction(RawStatement):
def __init__(self, loc, expr):
super(RawFunction, self).__init__(loc)
self.expr = expr
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
return Function(self.loc, ctx.eval(self.expr))
def mark_constant(self):
self.constant = is_constant_expr(self.expr)
class Function(Statement):
def __init__(self, loc, function):
super(Function, self).__init__(loc)
self.function = function
def _handles_event(self, event):
return True
def execute(self, trans, st, state, events):
fr = self.function(trans, st, trans.at)
if fr is not None:
return "continue", None, fr
else:
return "next", 0, None
# This parses an ATL block.
def parse_atl(l):
l.advance()
block_loc = l.get_location()
statements = [ ]
animation = False
while not l.eob:
loc = l.get_location()
if l.keyword('repeat'):
repeats = l.simple_expression()
statements.append(RawRepeat(loc, repeats))
elif l.keyword('block'):
l.require(':')
l.expect_eol()
l.expect_block('block')
block = parse_atl(l.subblock_lexer())
statements.append(block)
elif l.keyword('contains'):
expr = l.simple_expression()
if expr:
l.expect_noblock('contains expression')
statements.append(RawContainsExpr(loc, expr))
else:
l.require(':')
l.expect_eol()
l.expect_block('contains')
block = parse_atl(l.subblock_lexer())
statements.append(RawChild(loc, block))
elif l.keyword('parallel'):
l.require(':')
l.expect_eol()
l.expect_block('parallel')
block = parse_atl(l.subblock_lexer())
statements.append(RawParallel(loc, block))
elif l.keyword('choice'):
chance = l.simple_expression()
if not chance:
chance = "1.0"
l.require(':')
l.expect_eol()
l.expect_block('choice')
block = parse_atl(l.subblock_lexer())
statements.append(RawChoice(loc, chance, block))
elif l.keyword('on'):
names = [ l.require(l.word) ]
while l.match(','):
name = l.word()
if name is None:
break
names.append(name)
l.require(':')
l.expect_eol()
l.expect_block('on')
block = parse_atl(l.subblock_lexer())
statements.append(RawOn(loc, names, block))
elif l.keyword('time'):
time = l.require(l.simple_expression)
l.expect_noblock('time')
statements.append(RawTime(loc, time))
elif l.keyword('function'):
expr = l.require(l.simple_expression)
l.expect_noblock('function')
statements.append(RawFunction(loc, expr))
elif l.keyword('event'):
name = l.require(l.word)
l.expect_noblock('event')
statements.append(RawEvent(loc, name))
elif l.keyword('pass'):
l.expect_noblock('pass')
statements.append(None)
elif l.keyword('animation'):
l.expect_noblock('animation')
animation = True
else:
# If we can't assign it it a statement more specifically,
# we try to parse it into a RawMultipurpose. That will
# then be turned into another statement, as appropriate.
# The RawMultipurpose we add things to.
rm = renpy.atl.RawMultipurpose(loc)
# Is the last clause an expression?
last_expression = False
# Is this clause an expression?
this_expression = False
# First, look for a warper.
cp = l.checkpoint()
warper = l.name()
if warper in warpers:
duration = l.require(l.simple_expression)
warp_function = None
elif warper == "warp":
warper = None
warp_function = l.require(l.simple_expression)
duration = l.require(l.simple_expression)
else:
l.revert(cp)
warper = None
warp_function = None
duration = "0"
rm.add_warper(warper, duration, warp_function)
# Now, look for properties and simple_expressions.
while True:
# Update expression status.
last_expression = this_expression
this_expression = False
if l.keyword('pass'):
continue
# Parse revolution keywords.
if l.keyword('clockwise'):
rm.add_revolution('clockwise')
continue
if l.keyword('counterclockwise'):
rm.add_revolution('counterclockwise')
continue
if l.keyword('circles'):
expr = l.require(l.simple_expression)
rm.add_circles(expr)
# Try to parse a property.
cp = l.checkpoint()
prop = l.name()
if prop in PROPERTIES:
expr = l.require(l.simple_expression)
# We either have a property or a spline. It's the
# presence of knots that determine which one it is.
knots = [ ]
while l.keyword('knot'):
knots.append(l.require(l.simple_expression))
if knots:
knots.append(expr)
rm.add_spline(prop, knots)
else:
rm.add_property(prop, expr)
continue
# Otherwise, try to parse it as a simple expressoon,
# with an optional with clause.
l.revert(cp)
expr = l.simple_expression()
if not expr:
break
if last_expression:
l.error('ATL statement contains two expressions in a row; is one of them a misspelled property? If not, separate them with pass.')
this_expression = True
if l.keyword("with"):
with_expr = l.require(l.simple_expression)
else:
with_expr = None
rm.add_expression(expr, with_expr)
l.expect_noblock('ATL')
statements.append(rm)
if l.eol():
l.advance()
continue
l.require(",", "comma or end of line")
# Merge together statements that need to be merged together.
merged = [ ]
old = None
for new in statements:
if isinstance(old, RawParallel) and isinstance(new, RawParallel):
old.blocks.extend(new.blocks)
continue
elif isinstance(old, RawChoice) and isinstance(new, RawChoice):
old.choices.extend(new.choices)
continue
elif isinstance(old, RawChild) and isinstance(new, RawChild):
old.children.extend(new.children)
continue
elif isinstance(old, RawOn) and isinstance(new, RawOn):
old.handlers.update(new.handlers)
continue
# None is a pause statement, which gets skipped, but also
# prevents things from combining.
elif new is None:
old = new
continue
merged.append(new)
old = new
return RawBlock(block_loc, merged, animation)
```
#### File: T PC Windows Version/renpy/curry.py
```python
from __future__ import print_function
class Curry(object):
"""
Stores a callable and some arguments. When called, calls the
callable with the stored arguments and the additional arguments
supplied to the call.
"""
def __init__(self, callable, *args, **kwargs): # @ReservedAssignment
self.callable = callable
self.args = args
self.kwargs = kwargs
self.__doc__ = getattr(self.callable, "__doc__", None)
def __call__(self, *args, **kwargs):
return self.callable(*(self.args + args),
**dict(self.kwargs.items() + kwargs.items()))
def __repr__(self):
return "<curry %s %r %r>" % (self.callable, self.args, self.kwargs)
def __eq__(self, other):
return (
isinstance(other, Curry) and
self.callable == other.callable and
self.args == other.args and
self.kwargs == other.kwargs)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.callable) ^ hash(self.args) ^ hash(self.kwargs)
def curry(fn):
"""
Takes a callable, and returns something that, when called, returns
something that when called again, calls the function. So
basically, the thing returned from here when called twice does the
same thing as the function called once.
"""
rv = Curry(Curry, fn)
rv.__doc__ = getattr(fn, "__doc__", None)
return rv
def partial(function, *args, **kwargs):
"""
Stores the arguments and keyword arguments of function, and
returns something that, when called, calls the function with
a combination of the supplied arguments and the arguments of
the second call.
"""
return Curry(function, *args, **kwargs)
```
#### File: renpy/display/dragdrop.py
```python
from __future__ import print_function
import renpy.display
from renpy.display.render import render, Render, redraw
from renpy.display.core import absolute
from renpy.display.behavior import map_event, run, run_unhovered
import pygame_sdl2 as pygame
def default_drag_group():
"""
Gets the default drag group. If it doesn't exist yet, creates it.
"""
sls = renpy.game.context().scene_lists
rv = sls.drag_group
if rv is None:
rv = DragGroup()
sls.drag_group = rv
return rv
def default_drag_joined(drag):
return [ (drag, 0, 0) ]
def default_drop_allowable(drop, drags):
return True
class Drag(renpy.display.core.Displayable, renpy.python.RevertableObject):
"""
:doc: drag_drop class
:args: (d=None, drag_name=None, draggable=True, droppable=True, drag_raise=True, dragged=None, dropped=None, drag_handle=(0.0, 0.0, 1.0, 1.0), drag_joined=..., clicked=None, hovered=None, unhovered=None, mouse_drop=False, **properties)
A displayable that represents an object that can be dragged around
its enclosing area. A Drag can also represent an area that
other Drags can be dropped on.
A Drag can be moved around inside is parent. Generally, its parent
should be either a :func:`Fixed` or :class:`DragGroup`.
A Drag has one child. The child's state reflects the status
of the drag and drop operation:
* ``selected_hover`` - when it is being dragged.
* ``selected_idle`` - when it can be dropped on.
* ``hover`` - when the draggable will be dragged when the mouse is
clicked.
* ``idle`` - otherwise.
The drag handle is a rectangle inside the child. The mouse must be over
a non-transparent pixel inside the drag handle for dragging or clicking
to occur.
A newly-created draggable is added to the default DragGroup. A draggable
can only be in a single DragGroup - if it's added to a second group,
it's removed from the first.
When a Drag is first rendered, if it's position cannot be determined
from the DragGroup it is in, the position of its upper-left corner
is computed using the standard layout algorithm. Once that position
`d`
If present, the child of this Drag. Drags use the child style
in preference to this, if it's not None.
`drag_name`
If not None, the name of this draggable. This is available
as the `name` property of draggable objects. If a Drag
with the same name is or was in the DragGroup, the starting
position of this Drag is taken from that Draggable.
`draggable`
If true, the Drag can be dragged around the screen with
the mouse.
`droppable`
If true, other Drags can be dropped on this Drag.
`drag_raise`
If true, this Drag is raised to the top when it is dragged. If
it is joined to other Drags, all joined drags are raised.
`activated`
A callback (or list of callbacks) that is called when the mouse
is pressed down on the drag. It is called with one argument, a
a list of Drags that are being dragged. The return value of this
callback is ignored.
`dragged`
A callback (or list of callbacks) that is called when the Drag
has been dragged. It is called with two arguments. The first is
a list of Drags that are being dragged. The second is either
a Drag that is being dropped onto, or None of a drop did not
occur. If the callback returns a value other than None, that
value is returned as the result of the interaction.
`dropped`
A callback (or list of callbacks) that is called when this Drag
is dropped onto. It is called with two arguments. The first
is the Drag being dropped onto. The second is a list of Drags that
are being dragged. If the callback returns a value other than None,
that value is returned as the result of the interaction.
When a dragged and dropped callback are triggered for the same
event, the dropped callback is only called if dragged returns
None.
`clicked`
A callback this is called, with no arguments, when the Drag is
clicked without being moved. A droppable can also be focused
and clicked. If the callback returns a value other than None,
that value is returned as the result of the interaction.
`alternate`
An action that is run when the Drag is right-clicked (on the
desktop) or long-pressed without moving (on mobile). It may
be necessary to increase :var:`config.longpress_duration` if
this triggers to early on mobile platforms.
`drag_handle`
A (x, y, width, height) tuple, giving the position of the drag
handle within the child. In this tuple, integers are considered
to be a literal number of pixels, while floats are relative to
the size of the child.
`drag_joined`
This is called with the current Drag as an argument. It's
expected to return a list of [ (drag, x, y) ] tuples, giving
the draggables to drag as a unit. `x` and `y` are the offsets
of the drags relative to each other, they are not relative
to the corner of this drag.
`drag_offscreen`
If true, this draggable can be moved offscreen. This can be
dangerous to use with drag_joined or drags that can change
size, as the drags can leave the screen entirely, with no
way to get them back on the screen.
`mouse_drop`
If true, the drag is dropped on the first droppable under the cursor.
If false, the default, the drag is dropped onto the droppable with
the largest degree of overlap.
`drop_allowable`
A callback that is called to determine whether this drop allow
the current drags dropped onto. It is called with two arguments.
The first is the Drag which determines its sensitivity.
The second is a list of Drags that are being dragged.
Except for `d`, all of the parameters are available as fields (with
the same name) on the Drag object. In addition, after the drag has
been rendered, the following fields become available:
`x`, `y`
The position of the Drag relative to its parent, in pixels.
`w`, `h`
The width and height of the Drag's child, in pixels.
"""
z = 0
focusable = True
drag_group = None
old_position = None
drag_offscreen = False
activated = None
alternate = None
# The time a click started, or None if a click is not in progress.
click_time = None
def __init__(self,
d=None,
drag_name=None,
draggable=True,
droppable=True,
drag_raise=True,
dragged=None,
dropped=None,
drop_allowable=default_drop_allowable,
drag_handle=(0.0, 0.0, 1.0, 1.0),
drag_joined=default_drag_joined,
clicked=None,
hovered=None,
unhovered=None,
replaces=None,
drag_offscreen=False,
mouse_drop=False,
activated=None,
alternate=None,
style="drag",
**properties):
super(Drag, self).__init__(style=style, **properties)
self.drag_name = drag_name
self.draggable = draggable
self.droppable = droppable
self.drag_raise = drag_raise
self.dragged = dragged
self.dropped = dropped
self.drop_allowable = drop_allowable
self.drag_handle = drag_handle
self.drag_joined = drag_joined
self.clicked = clicked
self.hovered = hovered
self.unhovered = unhovered
self.activated = activated
self.alternate = alternate
self.drag_offscreen = drag_offscreen
# if mouse_drop_check is True (default False), the drop will not
# use default major overlap between droppables but instead
# will use mouse coordinates to select droppable
self.mouse_drop = mouse_drop
# We're focusable if we can be dragged.
self.focusable = draggable
self.child = None
# Add us to a drag group on creation.
if drag_name:
self.drag_group = default_drag_group()
# The current x and y coordinates of this displayable.
self.x = None
self.y = None
# The width and height of the child.
self.w = None
self.h = None
self.old_position = None
# The width and height of our parent.
self.parent_width = None
self.parent_height = None
# The target x and y coordinates of this displayable. (The
# coordinates that we're snapping to.)
self.target_x = None
self.target_y = None
# The offset from the location of the mouse to the "grab point",
# which is where the things that are being moved are offset from.
self.grab_x = None
self.grab_y = None
# x and y from the last time we rendered.
self.last_x = None
self.last_y = None
# The abs_x and abs_y from when we started the grab.
self.start_x = 0
self.start_y = 0
# The last time we were shown, using the animation timebases.
self.at = 0
# The (animation timebase) time at which we should reach
# the target coordinates for the currently executing snap animation.
self.target_at = 0
# The duration of a new snap animation to execute starting at
# the next render() call
self.target_at_delay = 0
# The displayable we were last dropping on.
self.last_drop = None
# Did we move over the course of this drag?
self.drag_moved = False
# A z index that's changed when something is raised or lowered.
self.z = 0
if replaces is not None:
self.x = replaces.x
self.y = replaces.y
self.at = replaces.at
self.target_x = replaces.target_x
self.target_y = replaces.target_y
self.target_at = replaces.target_at
self.target_at_delay = replaces.target_at_delay
self.grab_x = replaces.grab_x
self.grab_y = replaces.grab_y
self.last_x = replaces.last_x
self.last_y = replaces.last_y
self.old_position = replaces.old_position
self.drag_moved = replaces.drag_moved
self.last_drop = replaces.last_drop
self.mouse_drop = replaces.mouse_drop
self.click_time = replaces.click_time
self.z = replaces.z
if d is not None:
self.add(d)
def snap(self, x, y, delay=0):
"""
:doc: drag_drop method
Changes the position of the drag. If the drag is not showing,
then the position change is instantaneous. Otherwise, the
position change takes `delay` seconds, and is animated as a
linear move.
"""
if type(x) is float:
x = int(x * self.parent_width)
if type(y) is float:
y = int(y * self.parent_height)
self.target_x = x
self.target_y = y
if self.x is not None:
self.target_at_delay = delay
else:
self.target_at = self.at
self.x = x
self.y = y
if self.drag_group is not None:
self.drag_group.positions[self.drag_name] = (x, y, self.old_position)
redraw(self, 0)
def set_style_prefix(self, prefix, root):
if root:
super(Drag, self).set_style_prefix(prefix, root)
if self.child is not None:
self.child.set_style_prefix(prefix, False)
def add(self, d):
if self.child is not None:
raise Exception("Drag expects either zero or one children.")
self.child = renpy.easy.displayable(d)
def _clear(self):
self.child = None
renpy.display.render.redraw(self, 0)
def set_child(self, d):
"""
:doc: drag_drop method
Changes the child of this drag to `d`.
"""
d.per_interact()
self.child = renpy.easy.displayable(d)
renpy.display.render.invalidate(self)
def top(self):
"""
:doc: drag_drop method
Raises this displayable to the top of its drag_group.
"""
if self.drag_group is not None:
self.drag_group.raise_children([ self ])
def bottom(self):
"""
:doc: drag_drop method
Lowers this displayable to the bottom of its drag_group.
"""
if self.drag_group is not None:
self.drag_group.lower_children([ self ])
def visit(self):
return [ self.child ]
def focus(self, default=False):
super(Drag, self).focus(default)
rv = None
if not default:
rv = run(self.hovered)
return rv
def unfocus(self, default=False):
super(Drag, self).unfocus(default)
if not default:
run_unhovered(self.hovered)
run(self.unhovered)
def render(self, width, height, st, at):
child = self.style.child
if child is None:
child = self.child
self.parent_width = renpy.display.render.render_width
self.parent_height = renpy.display.render.render_height
cr = render(child, width, height, st, at)
cw, ch = cr.get_size()
rv = Render(cw, ch)
rv.blit(cr, (0, 0))
self.w = cw
self.h = ch
position = (self.style.xpos, self.style.ypos, self.style.xanchor, self.style.yanchor, self.style.xoffset, self.style.yoffset)
# If we don't have a position, then look for it in a drag group.
if (self.x is None) and (self.drag_group is not None) and (self.drag_name is not None):
if self.drag_name in self.drag_group.positions:
dgp = self.drag_group.positions[self.drag_name]
if len(dgp) == 3:
self.x, self.y, self.old_position = dgp
else:
self.x, self.y = dgp
self.old_position = position
if self.old_position != position:
place = True
elif self.x is None:
place = True
else:
place = False
# If we don't have a position, run the placement code and use
# that to compute our placement.
if place:
# This is required to get get_placement to work properly.
self.x = None
place_x, place_y = self.place(None, 0, 0, width, height, rv)
self.x = int(place_x)
self.y = int(place_y)
self.target_x = None
self.old_position = position
if self.target_x is None:
self.target_x = self.x
self.target_y = self.y
self.target_at = at
# Determine if we need to do the snap animation.
if self.target_at_delay:
# Snap starts now
self.target_at = at + self.target_at_delay
self.target_at_delay = 0
redraw(self, 0)
elif at >= self.target_at:
# Snap complete
self.x = self.target_x
self.y = self.target_y
else:
# Snap in progress
done = (at - self.at) / (self.target_at - self.at)
self.x = absolute(self.x + done * (self.target_x - self.x))
self.y = absolute(self.y + done * (self.target_y - self.y))
redraw(self, 0)
if self.draggable or self.clicked is not None:
fx, fy, fw, fh = self.drag_handle
if isinstance(fx, float):
fx = int(fx * cw)
if isinstance(fy, float):
fy = int(fy * ch)
if isinstance(fw, float):
fw = int(fw * cw)
if isinstance(fh, float):
fh = int(fh * ch)
mask = self.style.focus_mask
if mask is True:
mask = cr.subsurface((fx, fy, fw, fh))
elif mask is not None:
try:
mask = renpy.display.render.render(mask, fw, fh, st, at)
except:
if callable(mask):
mask = mask
else:
raise Exception("Focus_mask must be None, True, a displayable, or a callable.")
if mask is not None:
fmx = 0
fmy = 0
else:
fmx = None
fmy = None
rv.add_focus(self, None, fx, fy, fw, fh, fmx, fmy, mask)
self.last_x = self.x
self.last_y = self.y
self.at = at
return rv
def event(self, ev, x, y, st):
if not self.is_focused():
return self.child.event(ev, x, y, st)
# Mouse, in parent-relative coordinates.
par_x = int(self.last_x + x)
par_y = int(self.last_y + y)
grabbed = (renpy.display.focus.get_grab() is self)
if (self.alternate is not None) and renpy.display.touch and map_event(ev, "drag_activate"):
self.click_time = st
renpy.game.interface.timeout(renpy.config.longpress_duration)
if grabbed:
joined_offsets = self.drag_joined(self)
joined = [ i[0] for i in joined_offsets ]
elif self.draggable and map_event(ev, "drag_activate"):
joined_offsets = self.drag_joined(self)
joined = [ i[0] for i in joined_offsets ]
if not joined:
raise renpy.display.core.IgnoreEvent()
renpy.display.focus.set_grab(self)
run(joined[0].activated, joined)
self.grab_x = x
self.grab_y = y
# If we're not the only thing we're joined with, we
# might need to adjust our grab point.
for i, xo, yo in joined_offsets:
if i is self:
self.grab_x += xo
self.grab_y += yo
break
self.drag_moved = False
self.start_x = par_x
self.start_y = par_y
grabbed = True
elif (self.alternate is not None) and map_event(ev, "button_alternate"):
rv = run(self.alternate)
if rv is not None:
return rv
raise renpy.display.core.IgnoreEvent()
if ((self.alternate is not None) and
renpy.display.touch and
(self.click_time is not None) and
((st - self.click_time) > renpy.config.longpress_duration)):
self.click_time = None
rv = run(self.alternate)
if rv is not None:
return rv
# Handle clicking on droppables.
if not grabbed:
if self.clicked is not None and map_event(ev, "drag_deactivate"):
self.click_time = None
rv = run(self.clicked)
if rv is not None:
return rv
raise renpy.display.core.IgnoreEvent()
return self.child.event(ev, x, y, st)
# Handle moves by moving things relative to the grab point.
if ev.type in (pygame.MOUSEMOTION, pygame.MOUSEBUTTONUP, pygame.MOUSEBUTTONDOWN):
handled = True
if not self.drag_moved and (self.start_x != par_x or self.start_y != par_y):
self.drag_moved = True
self.click_time = None
# We may not be in the drag_joined group.
self.set_style_prefix("idle_", True)
# Set the style.
for i in joined:
i.set_style_prefix("selected_hover_", True)
# Raise the joined items.
if self.drag_raise and self.drag_group is not None:
self.drag_group.raise_children(joined)
if self.drag_moved:
for i, xo, yo in joined_offsets:
new_x = int(par_x - self.grab_x + xo)
new_y = int(par_y - self.grab_y + yo)
if not self.drag_offscreen:
new_x = max(new_x, 0)
new_x = min(new_x, int(i.parent_width - i.w))
new_y = max(new_y, 0)
new_y = min(new_y, int(i.parent_height - i.h))
if i.drag_group is not None and i.drag_name is not None:
i.drag_group.positions[i.drag_name] = (new_x, new_y, self.old_position)
i.x = new_x
i.y = new_y
i.target_x = new_x
i.target_y = new_y
i.target_at = self.at
redraw(i, 0)
else:
handled = False
if (self.drag_group is not None) and self.drag_moved:
if self.mouse_drop:
drop = self.drag_group.get_drop_at(joined, par_x, par_y)
else:
drop = self.drag_group.get_best_drop(joined)
else:
drop = None
if drop is not self.last_drop:
if self.last_drop is not None:
self.last_drop.set_style_prefix("idle_", True)
if drop is not None:
drop.set_style_prefix("selected_idle_", True)
self.last_drop = drop
if map_event(ev, 'drag_deactivate'):
self.click_time = None
renpy.display.focus.set_grab(None)
if drop is not None:
drop.set_style_prefix("idle_", True)
for i in joined:
i.set_style_prefix("idle_", True)
self.set_style_prefix("hover_", True)
self.grab_x = None
self.grab_y = None
self.last_drop = None
if self.drag_moved:
# Call the drag callback.
drag = joined[0]
if drag.dragged is not None:
rv = run(drag.dragged, joined, drop)
if rv is not None:
return rv
# Call the drop callback.
if drop is not None and drop.dropped is not None:
rv = run(drop.dropped, drop, joined)
if rv is not None:
return rv
else:
# Call the clicked callback.
if self.clicked:
rv = run(self.clicked)
if rv is not None:
return rv
if handled:
raise renpy.display.core.IgnoreEvent()
def get_placement(self):
if self.x is not None:
return self.x, self.y, 0, 0, 0, 0, True
else:
return super(Drag, self).get_placement()
def per_interact(self):
self.set_style_prefix("idle_", True)
super(Drag, self).per_interact()
class DragGroup(renpy.display.layout.MultiBox):
"""
:doc: drag_drop class
Represents a group of Drags. A Drag is limited to the boundary of
its DragGroup. Dropping only works between Drags that are in the
same DragGroup. Drags may only be raised when they are inside a
DragGroup.
A DragGroup is laid out like a :func:`Fixed`.
All positional parameters to the DragGroup constructor should be
Drags, that are added to the DragGroup.
`min_overlap`
An integer which means the minimum number of pixels at the
overlap so that drop will be allow.
"""
z_serial = 0
sorted = False
_list_type = renpy.python.RevertableList
def __unicode__(self):
return "DragGroup"
def __init__(self, *children, **properties):
properties.setdefault("style", "fixed")
properties.setdefault("layout", "fixed")
replaces = properties.pop("replaces", None)
min_overlap = properties.pop("min_overlap", 0)
self.min_overlap = min_overlap
super(DragGroup, self).__init__(**properties)
self.sorted = False
if replaces is not None:
self.positions = renpy.python.RevertableDict(replaces.positions)
self.sensitive = replaces.sensitive
self.z_serial = replaces.z_serial
else:
self.positions = renpy.python.RevertableDict()
self.sensitive = True
self.z_serial = 0
for i in children:
self.add(i)
def add(self, child):
"""
:doc: drag_drop method
Adds `child`, which must be a Drag, to this DragGroup.
"""
if not isinstance(child, Drag):
raise Exception("Only drags can be added to a drag group.")
child.drag_group = self
super(DragGroup, self).add(child)
self.sorted = False
def remove(self, child):
"""
:doc: drag_drop method
Removes `child` from this DragGroup.
"""
if not isinstance(child, Drag):
raise Exception("Only drags can be removed from a drag group.")
child.x = None
super(DragGroup, self).remove(child)
def render(self, width, height, st, at):
if not self.sorted:
self.children.sort(key=lambda i : i.z)
self.sorted = True
return super(DragGroup, self).render(width, height, st, at)
def event(self, ev, x, y, st):
if not self.sensitive:
return None
return super(DragGroup, self).event(ev, x, y, st)
def raise_children(self, l):
"""
Raises the children in `l` to the top of this drag_group, using the
order given in l for those children.
"""
self.sorted = False
for i in l:
self.z_serial += 1
i.z = self.z_serial
renpy.display.render.redraw(self, 0)
def lower_children(self, l):
"""
Lowers the children in `l` to the bottom of this drag group, with
the one at the bottom being the lowest.
"""
self.sorted = False
for i in l:
self.z_serial += 1
i.z = -self.z_serial
renpy.display.render.redraw(self, 0)
def get_best_drop(self, joined):
"""
Returns the droppable that the members of joined overlap the most.
"""
max_overlap = 0
rv = 0
joined_set = set(joined)
for d in joined:
r1 = (d.x, d.y, d.w, d.h)
for c in self.children:
if c in joined_set:
continue
if not c.droppable:
continue
if c.x is None:
continue
r2 = (c.x, c.y, c.w, c.h)
overlap = rect_overlap_area(r1, r2)
if (
overlap >= max_overlap and
overlap >= self.min_overlap and
c.drop_allowable(c, joined)
):
rv = c
max_overlap = overlap
if max_overlap <= 0:
return None
else:
return rv
def get_drop_at(self, joined, x, y):
"""
Returns the droppable that is exactly at x, y.
"""
joined_set = set(joined)
for c in self.children:
if c in joined_set:
continue
if not c.droppable:
continue
if c.x is None:
continue
if (
x >= c.x and y >= c.y and
x < (c.x + c.w) and y < (c.y + c.h) and
c.drop_allowable(c, joined)
):
return c
def get_children(self):
"""
Returns a list of Drags that are the children of
this DragGroup.
"""
return renpy.python.RevertableList(self.children)
def get_child_by_name(self, name):
"""
:doc: drag_drop method
Returns the first child of this DragGroup that has a drag_name
of name.
"""
for i in self.children:
if i.drag_name == name:
return i
return None
def rect_overlap_area(r1, r2):
"""
Returns the number of pixels by which rectangles r1 and r2 overlap.
"""
x1, y1, w1, h1 = r1
x2, y2, w2, h2 = r2
maxleft = max(x1, x2)
minright = min(x1 + w1, x2 + w2)
maxtop = max(y1, y2)
minbottom = min(y1 + h1, y2 + h2)
if minright < maxleft:
return 0
if minbottom < maxtop:
return 0
return (minright - maxleft) * (minbottom - maxtop)
```
#### File: renpy/display/joystick.py
```python
from __future__ import print_function
import renpy.display
import pygame_sdl2
# Do we have a joystick enabled?
enabled = False
class JoyBehavior(renpy.display.layout.Null):
"""
This is a behavior intended for joystick calibration. If a joystick
event occurs, this returns it as a string.
"""
pass
joysticks = { }
def count():
return pygame_sdl2.joystick.get_count()
def get(n):
if n in joysticks:
return joysticks[n]
try:
joysticks[n] = pygame_sdl2.joystick.Joystick(n)
return joysticks[n]
except:
return None
```
#### File: renpy/display/movetransition.py
```python
from __future__ import print_function
import renpy.display
# Utility function used by MoveTransition et al.
def position(d):
xpos, ypos, xanchor, yanchor, _xoffset, _yoffset, _subpixel = d.get_placement()
if xpos is None:
xpos = 0
if ypos is None:
ypos = 0
if xanchor is None:
xanchor = 0
if yanchor is None:
yanchor = 0
return xpos, ypos, xanchor, yanchor
def offsets(d):
_xpos, _ypos, _xanchor, _yanchor, xoffset, yoffset, _subpixel = d.get_placement()
if renpy.config.movetransition_respects_offsets:
return { 'xoffset' : xoffset, 'yoffset' : yoffset }
else:
return { }
# These are used by MoveTransition.
def MoveFactory(pos1, pos2, delay, d, **kwargs):
if pos1 == pos2:
return d
return renpy.display.motion.Move(pos1, pos2, delay, d, **kwargs)
def default_enter_factory(pos, delay, d, **kwargs):
return d
def default_leave_factory(pos, delay, d, **kwargs):
return None
# These can be used to move things in and out of the screen.
def MoveIn(pos, pos1, delay, d, **kwargs):
def aorb(a, b):
if a is None:
return b
return a
pos = tuple([aorb(a, b) for a, b in zip(pos, pos1)])
return renpy.display.motion.Move(pos, pos1, delay, d, **kwargs)
def MoveOut(pos, pos1, delay, d, **kwargs):
def aorb(a, b):
if a is None:
return b
return a
pos = tuple([aorb(a, b) for a, b in zip(pos, pos1)])
return renpy.display.motion.Move(pos1, pos, delay, d, **kwargs)
def ZoomInOut(start, end, pos, delay, d, **kwargs):
xpos, ypos, xanchor, yanchor = pos
FactorZoom = renpy.display.motion.FactorZoom
if end == 1.0:
return FactorZoom(start, end, delay, d, after_child=d, opaque=False,
xpos=xpos, ypos=ypos, xanchor=xanchor, yanchor=yanchor, **kwargs)
else:
return FactorZoom(start, end, delay, d, opaque=False,
xpos=xpos, ypos=ypos, xanchor=xanchor, yanchor=yanchor, **kwargs)
def RevolveInOut(start, end, pos, delay, d, **kwargs):
return renpy.display.motion.Revolve(start, end, delay, d, pos=pos, **kwargs)
def OldMoveTransition(delay, old_widget=None, new_widget=None, factory=None, enter_factory=None, leave_factory=None, old=False, layers=[ 'master' ]):
"""
Returns a transition that attempts to find images that have changed
position, and moves them from the old position to the new transition, taking
delay seconds to complete the move.
If `factory` is given, it is expected to be a function that takes as
arguments: an old position, a new position, the delay, and a
displayable, and to return a displayable as an argument. If not
given, the default behavior is to move the displayable from the
starting to the ending positions. Positions are always given as
(xpos, ypos, xanchor, yanchor) tuples.
If `enter_factory` or `leave_factory` are given, they are expected
to be functions that take as arguments a position, a delay, and a
displayable, and return a displayable. They are applied to
displayables that are entering or leaving the scene,
respectively. The default is to show in place displayables that
are entering, and not to show those that are leaving.
If `old` is True, then factory moves the old displayable with the
given tag. Otherwise, it moves the new displayable with that
tag.
`layers` is a list of layers that the transition will be applied
to.
Images are considered to be the same if they have the same tag, in
the same way that the tag is used to determine which image to
replace or to hide. They are also considered to be the same if
they have no tag, but use the same displayable.
Computing the order in which images are displayed is a three-step
process. The first step is to create a list of images that
preserves the relative ordering of entering and moving images. The
second step is to insert the leaving images such that each leaving
image is at the lowest position that is still above all images
that were below it in the original scene. Finally, the list
is sorted by zorder, to ensure no zorder violations occur.
If you use this transition to slide an image off the side of the
screen, remember to hide it when you are done. (Or just use
a leave_factory.)
"""
if factory is None:
factory = MoveFactory
if enter_factory is None:
enter_factory = default_enter_factory
if leave_factory is None:
leave_factory = default_leave_factory
use_old = old
def merge_slide(old, new):
# If new does not have .layers or .scene_list, then we simply
# insert a move from the old position to the new position, if
# a move occured.
if (not isinstance(new, renpy.display.layout.MultiBox)
or (new.layers is None and new.layer_name is None)):
if use_old:
child = old
else:
child = new
old_pos = position(old)
new_pos = position(new)
if old_pos != new_pos:
return factory(old_pos,
new_pos,
delay,
child,
**offsets(child)
)
else:
return child
# If we're in the layers_root widget, merge the child widgets
# for each layer.
if new.layers:
rv = renpy.display.layout.MultiBox(layout='fixed')
rv.layers = { }
for layer in renpy.config.layers:
f = new.layers[layer]
if (isinstance(f, renpy.display.layout.MultiBox)
and layer in layers
and f.scene_list is not None):
f = merge_slide(old.layers[layer], new.layers[layer])
rv.layers[layer] = f
rv.add(f)
return rv
# Otherwise, we recompute the scene list for the two widgets, merging
# as appropriate.
# Wraps the displayable found in SLE so that the various timebases
# are maintained.
def wrap(sle):
return renpy.display.layout.AdjustTimes(sle.displayable, sle.show_time, sle.animation_time)
def tag(sle):
return sle.tag or sle.displayable
def merge(sle, d):
rv = sle.copy()
rv.show_time = 0
rv.displayable = d
return rv
def entering(sle):
new_d = wrap(new_sle)
move = enter_factory(position(new_d), delay, new_d, **offsets(new_d))
if move is None:
return
rv_sl.append(merge(new_sle, move))
def leaving(sle):
old_d = wrap(sle)
move = leave_factory(position(old_d), delay, old_d, **offsets(old_d))
if move is None:
return
move = renpy.display.layout.IgnoresEvents(move)
rv_sl.append(merge(old_sle, move))
def moving(old_sle, new_sle):
old_d = wrap(old_sle)
new_d = wrap(new_sle)
if use_old:
child = old_d
else:
child = new_d
move = factory(position(old_d), position(new_d), delay, child, **offsets(child))
if move is None:
return
rv_sl.append(merge(new_sle, move))
# The old, new, and merged scene_lists.
old_sl = old.scene_list[:]
new_sl = new.scene_list[:]
rv_sl = [ ]
# A list of tags in old_sl, new_sl, and rv_sl.
old_map = dict((tag(i), i) for i in old_sl if i is not None)
new_tags = set(tag(i) for i in new_sl if i is not None)
rv_tags = set()
while old_sl or new_sl:
# If we have something in old_sl, then
if old_sl:
old_sle = old_sl[0]
old_tag = tag(old_sle)
# If the old thing has already moved, then remove it.
if old_tag in rv_tags:
old_sl.pop(0)
continue
# If the old thing does not match anything in new_tags,
# have it enter.
if old_tag not in new_tags:
leaving(old_sle)
rv_tags.add(old_tag)
old_sl.pop(0)
continue
# Otherwise, we must have something in new_sl. We want to
# either move it or have it enter.
new_sle = new_sl.pop(0)
new_tag = tag(new_sle)
# If it exists in both, move.
if new_tag in old_map:
old_sle = old_map[new_tag]
moving(old_sle, new_sle)
rv_tags.add(new_tag)
continue
else:
entering(new_sle)
rv_tags.add(new_tag)
continue
# Sort everything by zorder, to ensure that there are no zorder
# violations in the result.
rv_sl.sort(key=lambda a : a.zorder)
layer = new.layer_name
rv = renpy.display.layout.MultiBox(layout='fixed', focus=layer, **renpy.game.interface.layer_properties[layer])
rv.append_scene_list(rv_sl)
rv.layer_name = layer
return rv
# This calls merge_slide to actually do the merging.
rv = merge_slide(old_widget, new_widget)
rv.delay = delay # W0201
return rv
##############################################################################
# New Move Transition (since 6.14)
class MoveInterpolate(renpy.display.core.Displayable):
"""
This displayable has two children. It interpolates between the positions
of its two children to place them on the screen.
"""
def __init__(self, delay, old, new, use_old, time_warp):
super(MoveInterpolate, self).__init__()
# The old and new displayables.
self.old = old
self.new = new
# Should we display the old displayable?
self.use_old = use_old
# Time warp function or None.
self.time_warp = time_warp
# The width of the screen.
self.screen_width = 0
self.screen_height = 0
# The width of the selected child.
self.child_width = 0
self.child_height = 0
# The delay and st.
self.delay = delay
self.st = 0
def render(self, width, height, st, at):
self.screen_width = width
self.screen_height = height
old_r = renpy.display.render.render(self.old, width, height, st, at)
new_r = renpy.display.render.render(self.new, width, height, st, at)
if self.use_old:
cr = old_r
else:
cr = new_r
self.child_width, self.child_height = cr.get_size()
self.st = st
if self.st < self.delay:
renpy.display.render.redraw(self, 0)
return cr
def child_placement(self, child):
def based(v, base):
if v is None:
return 0
elif isinstance(v, int):
return v
elif isinstance(v, renpy.display.core.absolute):
return v
else:
return v * base
xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel = child.get_placement()
xpos = based(xpos, self.screen_width)
ypos = based(ypos, self.screen_height)
xanchor = based(xanchor, self.child_width)
yanchor = based(yanchor, self.child_height)
return xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel
def get_placement(self):
if self.st > self.delay:
done = 1.0
else:
done = self.st / self.delay
if self.time_warp is not None:
done = self.time_warp(done)
absolute = renpy.display.core.absolute
def I(a, b):
return absolute(a + done * (b - a))
old_xpos, old_ypos, old_xanchor, old_yanchor, old_xoffset, old_yoffset, old_subpixel = self.child_placement(self.old)
new_xpos, new_ypos, new_xanchor, new_yanchor, new_xoffset, new_yoffset, new_subpixel = self.child_placement(self.new)
xpos = I(old_xpos, new_xpos)
ypos = I(old_ypos, new_ypos)
xanchor = I(old_xanchor, new_xanchor)
yanchor = I(old_yanchor, new_yanchor)
xoffset = I(old_xoffset, new_xoffset)
yoffset = I(old_yoffset, new_yoffset)
subpixel = old_subpixel or new_subpixel
return xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel
def MoveTransition(delay, old_widget=None, new_widget=None, enter=None, leave=None, old=False, layers=[ 'master' ], time_warp=None, enter_time_warp=None, leave_time_warp=None):
"""
:doc: transition function
:args: (delay, enter=None, leave=None, old=False, layers=['master'], time_warp=None, enter_time_warp=None, leave_time_warp=None)
:name: MoveTransition
Returns a transition that interpolates the position of images (with the
same tag) in the old and new scenes.
`delay`
The time it takes for the interpolation to finish.
`enter`
If not None, images entering the scene will also be moved. The value
of `enter` should be a transform that is applied to the image to
get its starting position.
`leave`
If not None, images leaving the scene will also be move. The value
of `leave` should be a transform that is applied to the image to
get its ending position.
`old`
If true, the old image will be used in preference to the new one.
`layers`
A list of layers that moves are applied to.
`time_warp`
A time warp function that's applied to the interpolation. This
takes a number between 0.0 and 1.0, and should return a number in
the same range.
`enter_time_warp`
A time warp function that's applied to images entering the scene.
`leave_time_warp`
A time warp function that's applied to images leaving the scene.
"""
use_old = old
def merge_slide(old, new, merge_slide):
# This function takes itself as an argument to prevent a reference
# loop that occurs when it refers to itself in the it's parent's
# scope.
# If new does not have .layers or .scene_list, then we simply
# insert a move from the old position to the new position, if
# a move occured.
if (not isinstance(new, renpy.display.layout.MultiBox)
or (new.layers is None and new.layer_name is None)):
if old is new:
return new
else:
return MoveInterpolate(delay, old, new, use_old, time_warp)
# If we're in the layers_root widget, merge the child widgets
# for each layer.
if new.layers:
rv = renpy.display.layout.MultiBox(layout='fixed')
rv.layers = { }
for layer in renpy.config.layers:
f = new.layers[layer]
if (isinstance(f, renpy.display.layout.MultiBox)
and layer in layers
and f.scene_list is not None):
f = merge_slide(old.layers[layer], new.layers[layer], merge_slide)
rv.layers[layer] = f
rv.add(f)
return rv
# Otherwise, we recompute the scene list for the two widgets, merging
# as appropriate.
# Wraps the displayable found in SLE so that the various timebases
# are maintained.
def wrap(sle):
return renpy.display.layout.AdjustTimes(sle.displayable, sle.show_time, sle.animation_time)
def tag(sle):
return sle.tag or sle.displayable
def merge(sle, d):
rv = sle.copy()
rv.show_time = 0
rv.displayable = d
return rv
def entering(sle):
if not enter:
return
new_d = wrap(new_sle)
move = MoveInterpolate(delay, enter(new_d), new_d, False, enter_time_warp)
rv_sl.append(merge(new_sle, move))
def leaving(sle):
if not leave:
return
old_d = wrap(sle)
move = MoveInterpolate(delay, old_d, leave(old_d), True, leave_time_warp)
move = renpy.display.layout.IgnoresEvents(move)
rv_sl.append(merge(old_sle, move))
def moving(old_sle, new_sle):
if old_sle.displayable is new_sle.displayable:
rv_sl.append(new_sle)
return
old_d = wrap(old_sle)
new_d = wrap(new_sle)
move = MoveInterpolate(delay, old_d, new_d, use_old, time_warp)
rv_sl.append(merge(new_sle, move))
# The old, new, and merged scene_lists.
old_sl = old.scene_list[:]
new_sl = new.scene_list[:]
rv_sl = [ ]
# A list of tags in old_sl, new_sl, and rv_sl.
old_map = dict((tag(i), i) for i in old_sl if i is not None)
new_tags = set(tag(i) for i in new_sl if i is not None)
rv_tags = set()
while old_sl or new_sl:
# If we have something in old_sl, then
if old_sl:
old_sle = old_sl[0]
old_tag = tag(old_sle)
# If the old thing has already moved, then remove it.
if old_tag in rv_tags:
old_sl.pop(0)
continue
# If the old thing does not match anything in new_tags,
# have it enter.
if old_tag not in new_tags:
leaving(old_sle)
rv_tags.add(old_tag)
old_sl.pop(0)
continue
# Otherwise, we must have something in new_sl. We want to
# either move it or have it enter.
new_sle = new_sl.pop(0)
new_tag = tag(new_sle)
# If it exists in both, move.
if new_tag in old_map:
old_sle = old_map[new_tag]
moving(old_sle, new_sle)
rv_tags.add(new_tag)
continue
else:
entering(new_sle)
rv_tags.add(new_tag)
continue
# Sort everything by zorder, to ensure that there are no zorder
# violations in the result.
rv_sl.sort(key=lambda a : a.zorder)
layer = new.layer_name
rv = renpy.display.layout.MultiBox(layout='fixed', focus=layer, **renpy.game.interface.layer_properties[layer])
rv.append_scene_list(rv_sl)
return rv
# Call merge_slide to actually do the merging.
rv = merge_slide(old_widget, new_widget, merge_slide)
rv.delay = delay
return rv
```
#### File: renpy/display/tts.py
```python
from __future__ import print_function
import sys
import os
import renpy.audio
import subprocess
import pygame
class TTSRoot(Exception):
"""
An exception that can be used to cause the TTS system to read the text
of the root displayable, rather than text of the currently focused
displayable.
"""
# The root of the scene.
root = None
# The text of the last displayable.
last = ""
# The speech synthesis process.
process = None
def periodic():
global process
if process is not None:
if process.poll() is not None:
process = None
def is_active():
return process is not None
def default_tts_function(s):
"""
Default function which speaks messages using an os-specific method.
"""
global process
# Stop the existing process.
if process is not None:
try:
process.terminate()
process.wait()
except:
pass
process = None
s = s.strip()
if not s:
return
if renpy.game.preferences.self_voicing == "clipboard":
try:
pygame.scrap.put(pygame.SCRAP_TEXT, s.encode("utf-8"))
except:
pass
return
if renpy.game.preferences.self_voicing == "debug":
renpy.exports.restart_interaction()
return
fsencode = renpy.exports.fsencode
if "RENPY_TTS_COMMAND" in os.environ:
process = subprocess.Popen([ os.environ["RENPY_TTS_COMMAND"], fsencode(s) ])
elif renpy.linux:
if renpy.config.tts_voice is None:
process = subprocess.Popen([ "espeak", fsencode(s) ])
else:
process = subprocess.Popen([ "espeak", "-v", fsencode(renpy.config.tts_voice), fsencode(s) ])
elif renpy.macintosh:
if renpy.config.tts_voice is None:
process = subprocess.Popen([ "say", fsencode(s) ])
else:
process = subprocess.Popen([ "say", "-v", fsencode(renpy.config.tts_voice), fsencode(s) ])
elif renpy.windows:
if renpy.config.tts_voice is None:
voice = "default voice" # something that is unlikely to match.
else:
voice = renpy.config.tts_voice
say_vbs = os.path.join(os.path.dirname(sys.executable), "say.vbs")
s = s.replace('"', "")
process = subprocess.Popen([ "wscript", fsencode(say_vbs), fsencode(s), fsencode(voice) ])
def tts(s):
"""
Speaks the queued messages using the specified function.
"""
global queue
try:
renpy.config.tts_function(s)
except:
pass
queue = [ ]
def speak(s, translate=True, force=False):
"""
This is called by the system to queue the speaking of message `s`.
"""
if not force and not renpy.game.preferences.self_voicing:
return
if translate:
s = renpy.translation.translate_string(s)
tts(s)
def set_root(d):
global root
root = d
# The old value of the self_voicing preference.
old_self_voicing = False
def displayable(d):
"""
Causes the TTS system to read the text of the displayable `d`.
"""
global old_self_voicing
global last
self_voicing = renpy.game.preferences.self_voicing
if not self_voicing:
if old_self_voicing:
old_self_voicing = self_voicing
speak(renpy.translation.translate_string("Self-voicing disabled."), force=True)
last = ""
return
prefix = ""
if not old_self_voicing:
old_self_voicing = self_voicing
if self_voicing == "clipboard":
prefix = renpy.translation.translate_string("Clipboard voicing enabled. ")
else:
prefix = renpy.translation.translate_string("Self-voicing enabled. ")
for i in renpy.config.tts_voice_channels:
if not prefix and renpy.audio.music.get_playing(i):
return
if d is None:
d = root
while True:
try:
s = d._tts_all()
break
except TTSRoot:
if d is root:
return
else:
d = root
if s != last:
last = s
tts(prefix + s)
```
#### File: T PC Windows Version/renpy/editor.py
```python
from __future__ import print_function
import os
import renpy
import traceback
import subprocess
class Editor(object):
"""
This class is intended to be subclassed by editor subclasses. It provides a
number of editor related operations, which are called by Ren'Py (including
the Ren'Py Launcher).
Editor operations are grouped into transactions. An editor transaction
starts with a call to the begin() method. Ren'Py will then call some number
of command methods, each causing an operation to occur in the editor. Ren'Py
will call end() at the end of the transaction.
Although not required, it's reasonable than an implementation of this class
will batch the files together and send them to the editor at once. It's also
reasonable that an implementation will send the operations one at a time (and
do little-to-nothing in begin() and end().
Each operation takes a path to operate on. If the editor has a buffer
corresponding to that path, that buffer is used. Otherwise, the editor
is implicitly opened.
We reserve the right to add new keyword arguments to methods of this class,
so please ensure that subclasses accept and ignore unknown keyword
arguments.
"""
def begin(self, new_window=False, **kwargs):
"""
Begins an editor transaction.
`new_window`
If True, a new editor window will be created and presented to the
user. Otherwise, and existing editor window will be used.
"""
def end(self, **kwargs):
"""
Ends an editor transaction.
"""
def open(self, filename, line=None, **kwargs): # @ReservedAssignment
"""
Ensures `filename` is open in the editor. This may be called multiple
times per transaction.
`line`
If not None, this should be a line number to open in the
editor.
The first open call in a transaction is somewhat special - that file
should be given focus in a tabbed editor environment.
"""
# This should be set to True if the editor supports projects.
has_projects = False
def open_project(self, directory):
"""
Opens `directory` as a project in the editor.
"""
class SystemEditor(Editor):
def open(self, filename, line=None, **kwargs): # @ReservedAssignment
filename = renpy.exports.fsencode(filename)
try:
if renpy.windows:
os.startfile(filename) # @UndefinedVariable
elif renpy.macintosh:
subprocess.call([ "open", filename ]) # @UndefinedVariable
elif renpy.linux:
subprocess.call([ "xdg-open", filename ]) # @UndefinedVariable
except:
traceback.print_exc()
# The editor that Ren'Py is using. It should be a subclass of the Editor
# class.
editor = None
def init():
"""
Creates the editor object, based on the contents of the RENPY_EDIT_PY
file.
"""
global editor
editor = SystemEditor()
path = os.environ.get("RENPY_EDIT_PY", None)
if path is None:
return
with open(path, "r") as f:
source = f.read()
code = compile(source, path, "exec")
scope = { "__file__" : path }
exec code in scope, scope
if "Editor" in scope:
editor = scope["Editor"]()
return
raise Exception("{0} did not define an Editor class.".format(path))
def launch_editor(filenames, line=1, transient=False):
"""
Causes the editor to be launched.
"""
# On mobile devices, we will never be able to launch the editor.
if renpy.mobile:
return True
if editor is None:
init()
if editor is None:
return False
filenames = [ renpy.parser.unelide_filename(i) for i in filenames ]
try:
editor.begin(new_window=transient)
for i in filenames:
editor.open(i, line)
line = None # The line number only applies to the first filename.
editor.end()
return True
except:
traceback.print_exc()
return False
```
#### File: T PC Windows Version/renpy/main.py
```python
from __future__ import print_function
import renpy.display
import renpy.style
import renpy.sl2
import renpy.test
import renpy.game as game
import os
import sys
import time
import zipfile
import gc
import __main__
last_clock = time.time()
def log_clock(s):
global last_clock
now = time.time()
s = "{} took {:.2f}s".format(s, now - last_clock)
renpy.display.log.write(s)
if renpy.android and not renpy.config.log_to_stdout:
print(s)
last_clock = now
def reset_clock():
global last_clock
last_clock = time.time()
def run(restart):
"""
This is called during a single run of the script. Restarting the script
will cause this to change.
"""
reset_clock()
# Reset the store to a clean version of itself.
renpy.python.clean_stores()
log_clock("Cleaning stores")
# Init translation.
renpy.translation.init_translation()
log_clock("Init translation")
# Rebuild the various style caches.
renpy.style.build_styles() # @UndefinedVariable
log_clock("Build styles")
renpy.sl2.slast.load_cache()
log_clock("Load screen analysis")
# Analyze the screens.
renpy.display.screen.analyze_screens()
log_clock("Analyze screens")
if not restart:
renpy.sl2.slast.save_cache()
log_clock("Save screen analysis")
# Prepare the screens.
renpy.display.screen.prepare_screens()
log_clock("Prepare screens")
if not restart:
renpy.pyanalysis.save_cache()
log_clock("Save pyanalysis.")
renpy.game.script.save_bytecode()
log_clock("Save bytecode.")
# Handle arguments and commands.
if not renpy.arguments.post_init():
renpy.exports.quit()
if renpy.config.clear_lines:
renpy.scriptedit.lines.clear()
# Sleep to finish the presplash.
renpy.display.presplash.sleep()
# Re-Initialize the log.
game.log = renpy.python.RollbackLog()
# Switch contexts, begin logging.
game.contexts = [ renpy.execution.Context(True) ]
# Jump to an appropriate start label.
if game.script.has_label("_start"):
start_label = '_start'
else:
start_label = 'start'
game.context().goto_label(start_label)
try:
renpy.exports.log("--- " + time.ctime())
renpy.exports.log("")
except:
pass
# Note if this is a restart.
renpy.store._restart = restart
# We run until we get an exception.
renpy.display.interface.enter_context()
log_clock("Running {}".format(start_label))
renpy.execution.run_context(True)
def load_rpe(fn):
zfn = zipfile.ZipFile(fn)
autorun = zfn.read("autorun.py")
zfn.close()
sys.path.insert(0, fn)
exec autorun in dict()
def choose_variants():
if "RENPY_VARIANT" in os.environ:
renpy.config.variants = list(os.environ["RENPY_VARIANT"].split()) + [ None ]
return
renpy.config.variants = [ None ]
if renpy.android: # @UndefinedVariable
renpy.config.variants.insert(0, 'mobile')
renpy.config.variants.insert(0, 'android')
import android # @UnresolvedImport
import math
import pygame_sdl2 as pygame
from jnius import autoclass # @UnresolvedImport
# Manufacturer/Model-specific variants.
try:
Build = autoclass("android.os.Build")
manufacturer = Build.MANUFACTURER
model = Build.MODEL
print("Manufacturer", manufacturer, "model", model)
if manufacturer == "Amazon" and model.startswith("AFT"):
print("Running on a Fire TV.")
renpy.config.variants.insert(0, "firetv")
except:
pass
# Are we running on OUYA or Google TV or something similar?
package_manager = android.activity.getPackageManager()
if package_manager.hasSystemFeature("android.hardware.type.television"):
print("Running on a television.")
renpy.config.variants.insert(0, "tv")
renpy.config.variants.insert(0, "small")
return
# Otherwise, a phone or tablet.
renpy.config.variants.insert(0, 'touch')
pygame.display.init()
info = renpy.display.get_info()
diag = math.hypot(info.current_w, info.current_h) / android.get_dpi()
print("Screen diagonal is", diag, "inches.")
if diag >= 6:
renpy.config.variants.insert(0, 'tablet')
renpy.config.variants.insert(0, 'medium')
else:
renpy.config.variants.insert(0, 'phone')
renpy.config.variants.insert(0, 'small')
elif renpy.ios:
renpy.config.variants.insert(0, 'ios')
renpy.config.variants.insert(0, 'touch')
from pyobjus import autoclass # @UnresolvedImport @Reimport
UIDevice = autoclass("UIDevice")
idiom = UIDevice.currentDevice().userInterfaceIdiom
print("iOS device idiom", idiom)
# idiom 0 is iPhone, 1 is iPad. We assume any bigger idiom will
# be tablet-like.
if idiom >= 1:
renpy.config.variants.insert(0, 'tablet')
renpy.config.variants.insert(0, 'medium')
else:
renpy.config.variants.insert(0, 'phone')
renpy.config.variants.insert(0, 'small')
else:
renpy.config.variants.insert(0, 'pc')
renpy.config.variants.insert(0, 'large')
def main():
log_clock("Bootstrap to the start of init.init")
renpy.game.exception_info = 'Before loading the script.'
# Get ready to accept new arguments.
renpy.arguments.pre_init()
# Init the screen language parser.
renpy.sl2.slparser.init()
# Init the config after load.
renpy.config.init()
# Set up variants.
choose_variants()
renpy.display.touch = "touch" in renpy.config.variants
log_clock("Early init")
# Note the game directory.
game.basepath = renpy.config.gamedir
renpy.config.searchpath = [ renpy.config.gamedir ]
# Find the common directory.
commondir = __main__.path_to_common(renpy.config.renpy_base) # E1101 @UndefinedVariable
if os.path.isdir(commondir):
renpy.config.searchpath.append(commondir)
renpy.config.commondir = commondir
else:
renpy.config.commondir = None
# Add path from env variable, if any
if "RENPY_SEARCHPATH" in os.environ:
renpy.config.searchpath.extend(os.environ["RENPY_SEARCHPATH"].split("::"))
if renpy.android:
renpy.config.searchpath = [ ]
renpy.config.commondir = None
if "ANDROID_PUBLIC" in os.environ:
android_game = os.path.join(os.environ["ANDROID_PUBLIC"], "game")
print("Android searchpath: ", android_game)
if os.path.exists(android_game):
renpy.config.searchpath.insert(0, android_game)
# Load Ren'Py extensions.
for dir in renpy.config.searchpath: # @ReservedAssignment
for fn in os.listdir(dir):
if fn.lower().endswith(".rpe"):
load_rpe(dir + "/" + fn)
# The basename is the final component of the path to the gamedir.
for i in sorted(os.listdir(renpy.config.gamedir)):
if not i.endswith(".rpa"):
continue
i = i[:-4]
renpy.config.archives.append(i)
renpy.config.archives.reverse()
# Initialize archives.
renpy.loader.index_archives()
# Start auto-loading.
renpy.loader.auto_init()
log_clock("Loader init")
# Initialize the log.
game.log = renpy.python.RollbackLog()
# Initialize the store.
renpy.store.store = sys.modules['store']
# Set up styles.
game.style = renpy.style.StyleManager() # @UndefinedVariable
renpy.store.style = game.style
# Run init code in its own context. (Don't log.)
game.contexts = [ renpy.execution.Context(False) ]
game.contexts[0].init_phase = True
renpy.execution.not_infinite_loop(60)
# Load the script.
renpy.game.exception_info = 'While loading the script.'
renpy.game.script = renpy.script.Script()
if renpy.session.get("compile", False):
renpy.game.args.compile = True
# Set up error handling.
renpy.exports.load_module("_errorhandling")
if renpy.exports.loadable("tl/None/common.rpym") or renpy.exports.loadable("tl/None/common.rpymc"):
renpy.exports.load_module("tl/None/common")
renpy.config.init_system_styles()
renpy.style.build_styles() # @UndefinedVariable
log_clock("Loading error handling")
# If recompiling everything, remove orphan .rpyc files.
# Otherwise, will fail in case orphan .rpyc have same
# labels as in other scripts (usually happens on script rename).
if (renpy.game.args.command == 'compile') and not (renpy.game.args.keep_orphan_rpyc): # @UndefinedVariable
for (fn, dn) in renpy.game.script.script_files:
if dn is None:
continue
if not os.path.isfile(os.path.join(dn, fn + ".rpy")):
try:
name = os.path.join(dn, fn + ".rpyc")
os.rename(name, name + ".bak")
except OSError:
# This perhaps shouldn't happen since either .rpy or .rpyc should exist
pass
# Update script files list, so that it doesn't contain removed .rpyc's
renpy.loader.cleardirfiles()
renpy.game.script.scan_script_files()
# Load all .rpy files.
renpy.game.script.load_script() # sets renpy.game.script.
log_clock("Loading script")
if renpy.game.args.command == 'load-test': # @UndefinedVariable
start = time.time()
for i in range(5):
print(i)
renpy.game.script = renpy.script.Script()
renpy.game.script.load_script()
print(time.time() - start)
sys.exit(0)
renpy.game.exception_info = 'After loading the script.'
# Find the save directory.
if renpy.config.savedir is None:
renpy.config.savedir = __main__.path_to_saves(renpy.config.gamedir) # E1101 @UndefinedVariable
if renpy.game.args.savedir: # @UndefinedVariable
renpy.config.savedir = renpy.game.args.savedir # @UndefinedVariable
# Init preferences.
game.persistent = renpy.persistent.init()
game.preferences = game.persistent._preferences
for i in renpy.game.persistent._seen_translates: # @UndefinedVariable
if i in renpy.game.script.translator.default_translates:
renpy.game.seen_translates_count += 1
if game.persistent._virtual_size:
renpy.config.screen_width, renpy.config.screen_height = game.persistent._virtual_size
# Init save locations and loadsave.
renpy.savelocation.init()
# We need to be 100% sure we kill the savelocation thread.
try:
# Init save slots.
renpy.loadsave.init()
log_clock("Loading save slot metadata.")
# Load persistent data from all save locations.
renpy.persistent.update()
game.preferences = game.persistent._preferences
log_clock("Loading persistent")
# Clear the list of seen statements in this game.
game.seen_session = { }
# Initialize persistent variables.
renpy.store.persistent = game.persistent
renpy.store._preferences = game.preferences
renpy.store._test = renpy.test.testast._test
if renpy.parser.report_parse_errors():
raise renpy.game.ParseErrorException()
renpy.game.exception_info = 'While executing init code:'
for _prio, node in game.script.initcode:
if isinstance(node, renpy.ast.Node):
renpy.game.context().run(node)
else:
# An init function.
node()
renpy.game.exception_info = 'After initialization, but before game start.'
# Check if we should simulate android.
renpy.android = renpy.android or renpy.config.simulate_android # @UndefinedVariable
# Re-set up the logging.
renpy.log.post_init()
# Run the post init code, if any.
for i in renpy.game.post_init:
i()
renpy.game.script.report_duplicate_labels()
# Sort the images.
renpy.display.image.image_names.sort()
game.persistent._virtual_size = renpy.config.screen_width, renpy.config.screen_height
log_clock("Running init code")
renpy.pyanalysis.load_cache()
log_clock("Loading analysis data")
# Analyze the script and compile ATL.
renpy.game.script.analyze()
renpy.atl.compile_all()
log_clock("Analyze and compile ATL")
# Index the archive files. We should not have loaded an image
# before this point. (As pygame will not have been initialized.)
# We need to do this again because the list of known archives
# may have changed.
renpy.loader.index_archives()
log_clock("Index archives")
# Check some environment variables.
renpy.game.less_memory = "RENPY_LESS_MEMORY" in os.environ
renpy.game.less_mouse = "RENPY_LESS_MOUSE" in os.environ
renpy.game.less_updates = "RENPY_LESS_UPDATES" in os.environ
renpy.dump.dump(False)
renpy.game.script.make_backups()
log_clock("Dump and make backups.")
# Initialize image cache.
renpy.display.im.cache.init()
log_clock("Cleaning cache")
# Make a clean copy of the store.
renpy.python.make_clean_stores()
log_clock("Making clean stores")
gc.collect()
if renpy.config.manage_gc:
gc.set_threshold(*renpy.config.gc_thresholds)
gc_debug = int(os.environ.get("RENPY_GC_DEBUG", 0))
if renpy.config.gc_print_unreachable:
gc_debug |= gc.DEBUG_SAVEALL
gc.set_debug(gc_debug)
log_clock("Initial gc.")
# Start debugging file opens.
renpy.debug.init_main_thread_open()
# (Perhaps) Initialize graphics.
if not game.interface:
renpy.display.core.Interface()
log_clock("Creating interface object")
# Start things running.
restart = None
while True:
if restart:
renpy.display.screen.before_restart()
try:
try:
run(restart)
finally:
restart = (renpy.config.end_game_transition, "_invoke_main_menu", "_main_menu")
renpy.persistent.update(True)
except game.FullRestartException as e:
restart = e.reason
finally:
# Flush any pending interface work.
renpy.display.interface.finish_pending()
# Give Ren'Py a couple of seconds to finish saving.
renpy.loadsave.autosave_not_running.wait(3.0)
finally:
gc.set_debug(0)
renpy.loader.auto_quit()
renpy.savelocation.quit()
renpy.translation.write_updated_strings()
# This is stuff we do on a normal, non-error return.
if not renpy.display.error.error_handled:
renpy.display.render.check_at_shutdown()
```
#### File: T PC Windows Version/renpy/screenlang.py
```python
from __future__ import print_function
import renpy.display
import contextlib
# Grab the python versions of the parser and ast modules.
ast = __import__("ast")
# The filename of the file we're parsing.
filename = None
new_variable_serial = 0
# Returns the name of a new variable.
@contextlib.contextmanager
def new_variable():
global new_variable_serial
new_variable_serial += 1
yield "_%d" % new_variable_serial
new_variable_serial -= 1
def increment_lineno(node, amount):
for node in ast.walk(node):
if hasattr(node, 'lineno'):
node.lineno += amount
class LineNumberNormalizer(ast.NodeVisitor):
def __init__(self):
self.last_line = 1
def generic_visit(self, node):
if hasattr(node, 'lineno'):
self.last_line = max(self.last_line, node.lineno)
node.lineno = self.last_line
super(LineNumberNormalizer, self).generic_visit(node)
##############################################################################
# Parsing.
# The parser that things are being added to.
parser = None
class Positional(object):
"""
This represents a positional parameter to a function.
"""
def __init__(self, name):
self.name = name
if parser:
parser.add(self)
class Keyword(object):
"""
This represents an optional keyword parameter to a function.
"""
def __init__(self, name):
self.name = name
if parser:
parser.add(self)
STYLE_PREFIXES = [
'',
'insensitive_',
'hover_',
'idle_',
'activate_',
'selected_',
'selected_insensitive_',
'selected_hover_',
'selected_idle_',
'selected_activate_',
]
class Style(object):
"""
This represents a style parameter to a function.
"""
def __init__(self, name):
self.name = name
if parser:
parser.add(self)
class PrefixStyle(object):
"""
This represents a prefixed style parameter to a function.
"""
def __init__(self, prefix, name):
self.prefix = prefix
self.name = name
if parser:
parser.add(self)
class Parser(object):
def __init__(self, name):
# The name of this object.
self.name = name
# The positional arguments, keyword arguments, and child
# statements of this statement.
self.positional = [ ]
self.keyword = { }
self.children = { }
all_statements.append(self)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.name)
def add(self, i):
"""
Adds a clause to this parser.
"""
if isinstance(i, list):
for j in i:
self.add(j)
return
if isinstance(i, Positional):
self.positional.append(i)
elif isinstance(i, Keyword):
self.keyword[i.name] = i
elif isinstance(i, Style):
for j in STYLE_PREFIXES:
self.keyword[j + i.name] = i
elif isinstance(i, PrefixStyle):
for j in STYLE_PREFIXES:
self.keyword[i.prefix + j + i.name] = i
elif isinstance(i, Parser):
self.children[i.name] = i
def parse_statement(self, l, name, layout_mode=False):
word = l.word() or l.match(r'\$')
if word and word in self.children:
if layout_mode:
c = self.children[word].parse_layout(l, name)
else:
c = self.children[word].parse(l, name)
return c
else:
return None
def parse_layout(self, l, name):
l.error("The %s statement cannot be used as a container for the has statement." % self.name)
def parse_children(self, stmt, l, name):
l.expect_block(stmt)
l = l.subblock_lexer()
rv = [ ]
with new_variable() as child_name:
count = 0
while l.advance():
if len(l.block) != 1:
rv.extend(self.parse_exec("%s = (%s, %d)" % (child_name, name, count), l.number))
else:
child_name = name
c = self.parse_statement(l, child_name)
if c is None:
l.error('Expected screen language statement.')
rv.extend(c)
count += 1
return rv
def parse_eval(self, expr, lineno=1):
"""
Parses an expression for eval, and then strips off the module
and expr instances, and adjusts the line number.
"""
if isinstance(expr, unicode):
expr = renpy.python.escape_unicode(expr)
try:
rv = ast.parse(expr, 'eval').body[0].value
except SyntaxError as e:
raise renpy.parser.ParseError(
filename,
lineno + e[1][1] - 1,
"Syntax error while parsing python expression.",
e[1][3],
e[1][2])
increment_lineno(rv, lineno-1)
return rv
def parse_exec(self, code, lineno=1):
"""
Parses an expression for exec, then strips off the module and
adjusts the line number. Returns a list of statements.
"""
if isinstance(code, unicode):
code = renpy.python.escape_unicode(code)
try:
rv = ast.parse(code, 'exec')
except SyntaxError as e:
raise renpy.parser.ParseError(
filename,
lineno + e[1][1] - 1,
"Syntax error while parsing python code.",
e[1][3],
e[1][2])
increment_lineno(rv, lineno-1)
return rv.body
def parse_simple_expression(self, l):
lineno = l.number
expr = l.require(l.simple_expression)
return self.parse_eval(expr, lineno)
def parse_comma_expression(self, l):
lineno = l.number
expr = l.require(l.comma_expression)
return self.parse_eval(expr, lineno)
def parse(self, l, name):
"""
This is expected to parse a function statement, and to return
a list of python ast statements.
`l` the lexer.
`name` the name of the variable containing the name of the
current statement.
"""
raise Exception("Not Implemented")
# A singleton value.
many = renpy.object.Sentinel("many")
class FunctionStatementParser(Parser):
"""
This is responsible for parsing function statements.
"""
def __init__(self, name, function, nchildren=0, unevaluated=False, scope=False):
super(FunctionStatementParser, self).__init__(name)
# Functions that are called when this statement runs.
self.function = function
# The number of children we have.
self.nchildren = nchildren
# True if we should evaluate arguments and children. False
# if we should just pass them into our child.
self.unevaluated = unevaluated
# Add us to the appropriate lists.
global parser
parser = self
if nchildren != 0:
childbearing_statements.append(self)
self.scope = scope
def parse_layout(self, l, name):
return self.parse(l, name, True)
def parse(self, l, name, layout_mode=False):
# The list of nodes this function returns.
rv = [ ]
# The line number of the current node.
lineno = l.number
if layout_mode and self.nchildren == 0:
l.error("The %s statement cannot be used as a layout." % self.name)
func = self.parse_eval(self.function, lineno)
call_node = ast.Call(
lineno=lineno,
col_offset=0,
func=func,
args=[ ],
keywords=[ ],
starargs=None,
kwargs=None,
)
seen_keywords = set()
# Parses a keyword argument from the lexer.
def parse_keyword(l, expect):
name = l.word()
if name is None:
l.error(expect)
if name not in self.keyword:
l.error('%r is not a keyword argument or valid child for the %s statement.' % (name, self.name))
if name in seen_keywords:
l.error('keyword argument %r appears more than once in a %s statement.' % (name, self.name))
seen_keywords.add(name)
expr = self.parse_comma_expression(l)
call_node.keywords.append(
ast.keyword(arg=str(name), value=expr),
)
# We assume that the initial keyword has been parsed already,
# so we start with the positional arguments.
for _i in self.positional:
call_node.args.append(self.parse_simple_expression(l))
# Next, we allow keyword arguments on the starting line.
while True:
if l.match(':'):
l.expect_eol()
l.expect_block(self.name)
block = True
break
if l.eol():
l.expect_noblock(self.name)
block = False
break
parse_keyword(l, "expected a keyword argument, colon, or end of line.")
rv.append(ast.Expr(value=call_node))
if self.nchildren == 1:
rv.extend(self.parse_exec('ui.child_or_fixed()'))
needs_close = (self.nchildren != 0)
# The index of the child we're adding to this statement.
child_index = 0
# A list of lexers we need to parse the contents of.
lexers = [ ]
if block:
lexers.append(l.subblock_lexer())
if layout_mode:
lexers.append(l)
# The variable we store the child's name in.
with new_variable() as child_name:
# If we have a block, parse it. This also takes care of parsing the
# block of a has clause.
for l in lexers:
while l.advance():
state = l.checkpoint()
if l.keyword(r'has'):
if self.nchildren != 1:
l.error("The %s statement does not take a layout." % self.name)
if child_index != 0:
l.error("The has statement may not be given after a child has been supplied.")
c = self.parse_statement(l, child_name, layout_mode=True)
if c is None:
l.error('Has expects a child statement.')
# Remove the call to child_or_fixed.
rv.pop()
rv.extend(self.parse_exec("%s = (%s, %d)" % (child_name, name, child_index)))
rv.extend(c)
needs_close = False
continue
c = self.parse_statement(l, child_name)
if c is not None:
rv.extend(self.parse_exec("%s = (%s, %d)" % (child_name, name, child_index)))
rv.extend(c)
child_index += 1
continue
l.revert(state)
if not l.eol():
parse_keyword(l, "expected a keyword argument or child statement.")
while not l.eol():
parse_keyword(l, "expected a keyword argument or end of line.")
if needs_close:
rv.extend(self.parse_exec("ui.close()"))
if "id" not in seen_keywords:
call_node.keywords.append(ast.keyword(arg="id", value=self.parse_eval(name, lineno)))
if "scope" not in seen_keywords and self.scope:
call_node.keywords.append(ast.keyword(arg="scope", value=self.parse_eval("_scope", lineno)))
return rv
##############################################################################
# Definitions of screen language statements.
# Used to allow statements to take styles.
styles = [ ]
# All statements defined, and statements that take children.
all_statements = [ ]
childbearing_statements = [ ]
position_property_names = [
"anchor",
"xanchor",
"yanchor",
"pos",
"xpos",
"ypos",
"align",
"xalign",
"yalign",
"xoffset",
"yoffset",
"maximum",
"xmaximum",
"ymaximum",
"area",
"clipping",
"xfill",
"yfill",
# no center, since it can conflict with the center transform.
"xcenter",
"ycenter",
"xsize",
"ysize",
"xysize",
"alt",
"debug",
]
position_properties = [ Style(i) for i in position_property_names ]
text_position_properties = [ PrefixStyle("text_", i) for i in position_property_names ]
side_position_properties = [ PrefixStyle("side_", i) for i in position_property_names ]
text_property_names = [
"antialias",
"vertical",
"black_color",
"bold",
"color",
"drop_shadow",
"drop_shadow_color",
"first_indent",
"font",
"size",
"hyperlink_functions",
"italic",
"justify",
"kerning",
"language",
"layout",
"line_leading",
"line_spacing",
"minwidth",
"min_width",
"newline_indent",
"outlines",
"rest_indent",
"ruby_style",
"slow_cps",
"slow_cps_multiplier",
"slow_abortable",
"strikethrough",
"text_align",
"text_y_fudge",
"underline",
"minimum",
"xminimum",
"yminimum",
]
text_properties = [ Style(i) for i in text_property_names ]
text_text_properties = [ PrefixStyle("text_", i) for i in text_property_names ]
window_properties = [ Style(i) for i in [
"background",
"foreground",
"left_margin",
"right_margin",
"bottom_margin",
"top_margin",
"xmargin",
"ymargin",
"left_padding",
"right_padding",
"top_padding",
"bottom_padding",
"xpadding",
"ypadding",
"size_group",
"minimum",
"xminimum",
"yminimum",
] ]
button_properties = [ Style(i) for i in [
"sound",
"mouse",
"focus_mask",
"child",
"keyboard_focus",
] ]
bar_properties = [ Style(i) for i in [
"bar_vertical",
"bar_invert",
"bar_resizing",
"left_gutter",
"right_gutter",
"top_gutter",
"bottom_gutter",
"left_bar",
"right_bar",
"top_bar",
"bottom_bar",
"thumb",
"thumb_shadow",
"thumb_offset",
"mouse",
"unscrollable",
"keyboard_focus",
] ]
box_properties = [ Style(i) for i in [
"box_layout",
"box_wrap",
"box_wrap_spacing",
"box_reverse",
"order_reverse",
"spacing",
"first_spacing",
"fit_first",
"minimum",
"xminimum",
"yminimum",
] ]
ui_properties = [
Keyword("at"),
Keyword("id"),
Keyword("style"),
Keyword("style_group"),
Keyword("focus"),
Keyword("default"),
]
def add(thing):
parser.add(thing)
##############################################################################
# UI statements.
FunctionStatementParser("null", "ui.null", 0)
Keyword("width")
Keyword("height")
add(ui_properties)
add(position_properties)
FunctionStatementParser("text", "ui.text", 0, scope=True)
Positional("text")
Keyword("slow")
Keyword("slow_done")
Keyword("substitute")
Keyword("scope")
add(ui_properties)
add(position_properties)
add(text_properties)
FunctionStatementParser("hbox", "ui.hbox", many)
add(ui_properties)
add(position_properties)
add(box_properties)
FunctionStatementParser("vbox", "ui.vbox", many)
add(ui_properties)
add(position_properties)
add(box_properties)
FunctionStatementParser("fixed", "ui.fixed", many)
add(ui_properties)
add(position_properties)
add(box_properties)
FunctionStatementParser("grid", "ui.grid", many)
Positional("cols")
Positional("rows")
Keyword("transpose")
Style("spacing")
add(ui_properties)
add(position_properties)
FunctionStatementParser("side", "ui.side", many)
Positional("positions")
Style("spacing")
add(ui_properties)
add(position_properties)
# Omit sizer, as we can always just put an xmaximum and ymaximum on an item.
for name in [ "window", "frame" ]:
FunctionStatementParser(name, "ui." + name, 1)
add(ui_properties)
add(position_properties)
add(window_properties)
FunctionStatementParser("key", "ui.key", 0)
Positional("key")
Keyword("action")
FunctionStatementParser("timer", "ui.timer", 0)
Positional("delay")
Keyword("action")
Keyword("repeat")
# Omit behaviors.
# Omit menu as being too high-level.
FunctionStatementParser("input", "ui.input", 0)
Keyword("default")
Keyword("length")
Keyword("allow")
Keyword("exclude")
Keyword("copypaste")
Keyword("prefix")
Keyword("suffix")
Keyword("changed")
Keyword("pixel_width")
add(ui_properties)
add(position_properties)
add(text_properties)
FunctionStatementParser("image", "ui.image", 0)
Positional("im")
# Omit imagemap_compat for being too high level (and obsolete).
FunctionStatementParser("button", "ui.button", 1)
Keyword("action")
Keyword("clicked")
Keyword("hovered")
Keyword("unhovered")
Keyword("alternate")
Keyword("selected")
Keyword("sensitive")
add(ui_properties)
add(position_properties)
add(window_properties)
add(button_properties)
FunctionStatementParser("imagebutton", "ui.imagebutton", 0)
Keyword("auto")
Keyword("idle")
Keyword("hover")
Keyword("insensitive")
Keyword("selected_idle")
Keyword("selected_hover")
Keyword("selected_insensitive")
Keyword("action")
Keyword("clicked")
Keyword("hovered")
Keyword("unhovered")
Keyword("alternate")
Keyword("image_style")
Keyword("selected")
Keyword("sensitive")
add(ui_properties)
add(position_properties)
add(window_properties)
add(button_properties)
FunctionStatementParser("textbutton", "ui.textbutton", 0, scope=True)
Positional("label")
Keyword("action")
Keyword("clicked")
Keyword("hovered")
Keyword("unhovered")
Keyword("alternate")
Keyword("text_style")
Keyword("substitute")
Keyword("scope")
Keyword("selected")
Keyword("sensitive")
add(ui_properties)
add(position_properties)
add(window_properties)
add(button_properties)
add(text_position_properties)
add(text_text_properties)
FunctionStatementParser("label", "ui.label", 0, scope=True)
Positional("label")
Keyword("text_style")
add(ui_properties)
add(position_properties)
add(window_properties)
add(text_position_properties)
add(text_text_properties)
for name in [ "bar", "vbar" ]:
FunctionStatementParser(name, "ui." + name, 0)
Keyword("adjustment")
Keyword("range")
Keyword("value")
Keyword("changed")
Keyword("hovered")
Keyword("unhovered")
add(ui_properties)
add(position_properties)
add(bar_properties)
# Omit autobar. (behavior)
FunctionStatementParser("viewport", "ui.viewport", 1)
Keyword("child_size")
Keyword("mousewheel")
Keyword("arrowkeys")
Keyword("draggable")
Keyword("edgescroll")
Keyword("xadjustment")
Keyword("yadjustment")
Keyword("xinitial")
Keyword("yinitial")
Keyword("scrollbars")
PrefixStyle("side_", "spacing")
add(ui_properties)
add(position_properties)
add(side_position_properties)
# Omit conditional. (behavior)
FunctionStatementParser("imagemap", "ui.imagemap", many)
Keyword("ground")
Keyword("hover")
Keyword("insensitive")
Keyword("idle")
Keyword("selected_hover")
Keyword("selected_idle")
Keyword("selected_insensitive")
Keyword("auto")
Keyword("alpha")
Keyword("cache")
add(ui_properties)
add(position_properties)
FunctionStatementParser("hotspot", "ui.hotspot_with_child", 1)
Positional("spot")
Keyword("action")
Keyword("clicked")
Keyword("hovered")
Keyword("unhovered")
add(ui_properties)
add(position_properties)
add(window_properties)
add(button_properties)
FunctionStatementParser("hotbar", "ui.hotbar", 0)
Positional("spot")
Keyword("adjustment")
Keyword("range")
Keyword("value")
add(ui_properties)
add(position_properties)
add(bar_properties)
FunctionStatementParser("transform", "ui.transform", 1)
Keyword("at")
Keyword("id")
for i in renpy.atl.PROPERTIES:
Style(i)
FunctionStatementParser("add", "ui.add", 0)
Positional("im")
Keyword("at")
Keyword("id")
for i in renpy.atl.PROPERTIES:
Style(i)
FunctionStatementParser("on", "ui.on", 0)
Positional("event")
Keyword("action")
FunctionStatementParser("drag", "ui.drag", 1)
Keyword("drag_name")
Keyword("draggable")
Keyword("droppable")
Keyword("drag_raise")
Keyword("dragged")
Keyword("dropped")
Keyword("drop_allowable")
Keyword("drag_handle")
Keyword("drag_joined")
Keyword("clicked")
Keyword("hovered")
Keyword("unhovered")
Keyword("mouse_drop")
Style("child")
add(ui_properties)
add(position_properties)
FunctionStatementParser("draggroup", "ui.draggroup", many)
Keyword("min_overlap")
add(ui_properties)
add(position_properties)
FunctionStatementParser("mousearea", "ui.mousearea", 0)
Keyword("hovered")
Keyword("unhovered")
Style("focus_mask")
add(ui_properties)
add(position_properties)
##############################################################################
# Control-flow statements.
class PassParser(Parser):
def __init__(self, name):
super(PassParser, self).__init__(name)
def parse(self, l, name):
return self.parse_exec("pass", l.number)
PassParser("pass")
class DefaultParser(Parser):
def __init__(self, name):
super(DefaultParser, self).__init__(name)
def parse(self, l, name):
name = l.require(l.word)
l.require(r'=')
rest = l.rest()
code = "_scope.setdefault(%r, (%s))" % (name, rest)
return self.parse_exec(code, l.number)
DefaultParser("default")
class UseParser(Parser):
def __init__(self, name):
super(UseParser, self).__init__(name)
childbearing_statements.append(self)
def parse(self, l, name):
lineno = l.number
target_name = l.require(l.word)
code = "renpy.use_screen(%r" % target_name
args = renpy.parser.parse_arguments(l)
if args:
for k, v in args.arguments:
if k is None:
code += ", (%s)" % v
else:
code += ", %s=(%s)" % (k, v)
code += ", _name=%s, _scope=_scope" % name
if args:
if args.extrapos:
code += ", *(%s)" % args.extrapos
if args.extrakw:
code += ", **(%s)" % args.extrakw
code += ")"
return self.parse_exec(code, lineno)
UseParser("use")
class IfParser(Parser):
def __init__(self, name):
super(IfParser, self).__init__(name)
childbearing_statements.append(self)
def parse(self, l, name):
with new_variable() as child_name:
count = 0
lineno = l.number
condition = self.parse_eval(l.require(l.python_expression), lineno)
l.require(':')
l.expect_eol()
body = self.parse_exec("%s = (%s, %d)" % (child_name, name, count))
body.extend(self.parse_children('if', l, child_name))
orelse = [ ]
rv = ast.If(test=condition, body=body, orelse=orelse, lineno=lineno, col_offset=0)
count += 1
state = l.checkpoint()
while l.advance():
old_orelse = orelse
lineno = l.number
if l.keyword("elif"):
condition = self.parse_eval(l.require(l.python_expression), lineno)
body = self.parse_exec("%s = (%s, %d)" % (child_name, name, count))
body.extend(self.parse_children('if', l, child_name))
orelse = [ ]
old_orelse.append(ast.If(test=condition, body=body, orelse=orelse, lineno=lineno, col_offset=0))
count += 1
state = l.checkpoint()
elif l.keyword("else"):
old_orelse.extend(self.parse_exec("%s = (%s, %d)" % (child_name, name, count)))
old_orelse.extend(self.parse_children('if', l, child_name))
break
else:
l.revert(state)
break
return [ rv ]
IfParser("if")
class ForParser(Parser):
def __init__(self, name):
super(ForParser, self).__init__(name)
childbearing_statements.append(self)
def parse_tuple_pattern(self, l):
is_tuple = False
pattern = [ ]
while True:
lineno = l.number
if l.match(r"\("):
p = self.parse_tuple_pattern(l)
else:
p = l.name().encode("utf-8")
if not p:
break
pattern.append(ast.Name(id=p, ctx=ast.Store(), lineno=lineno, col_offset=0))
if l.match(r","):
is_tuple = True
else:
break
if not pattern:
l.error("Expected tuple pattern.")
if not is_tuple:
return pattern[0]
else:
return ast.Tuple(elts=pattern, ctx=ast.Store())
def parse(self, l, name):
lineno = l.number
pattern = self.parse_tuple_pattern(l)
l.require('in')
expression = self.parse_eval(l.require(l.python_expression), l.number)
l.require(':')
l.expect_eol()
with new_variable() as counter_name:
with new_variable() as child_name:
children = self.parse_exec("%s = (%s, %s)" % (child_name, name, counter_name))
children.extend(self.parse_children('for', l, child_name))
children.extend(self.parse_exec("%s += 1" % counter_name))
rv = self.parse_exec("%s = 0" % counter_name)
rv.append(ast.For(
target=pattern,
iter=expression,
body=children,
orelse=[],
lineno=lineno,
col_offset=0))
return rv
ForParser("for")
class PythonParser(Parser):
def __init__(self, name, one_line):
super(PythonParser, self).__init__(name)
self.one_line = one_line
def parse(self, l, name):
lineno = l.number
if self.one_line:
python_code = l.rest()
l.expect_noblock('one-line python statement')
else:
l.require(':')
l.expect_block('python block')
python_code = l.python_block()
lineno += 1
return self.parse_exec(python_code, lineno)
PythonParser("$", True)
PythonParser("python", False)
##############################################################################
# Add all_statements to the statements that take children.
for i in childbearing_statements:
i.add(all_statements)
##############################################################################
# Definition of the screen statement.
# class ScreenFunction(renpy.object.Object):
# def __init__(self, children):
# self.children = children
# def __call__(self, _name=(), _scope=None, **kwargs):
# for i, child in enumerate(self.children):
# child.evaluate(_name + (i,), _scope)
# def screen_function(positional, keyword, children):
# name = renpy.python.py_eval(positional[0].source)
# function = ScreenFunction(children)
# values = {
# "name" : name,
# "function" : function,
# }
# for k, v in keyword.iteritems():
# values[k] = renpy.python.py_eval(v.source)
# return values
# screen_stmt = FunctionStatementParser("screen", screen_function, unevaluated=True)
# Positional("name", Word)
# Keyword("modal", Expression)
# Keyword("zorder", Expression)
# Keyword("tag", Word)
# add(all_statements)
class ScreenLangScreen(renpy.object.Object):
"""
This represents a screen defined in the screen language.
"""
__version__ = 1
variant = "None"
# Predict should be false for screens created before
# prediction existed.
predict = "False"
parameters = None
location = None
def __init__(self):
# The name of the screen.
self.name = name
# Should this screen be declared as modal?
self.modal = "False"
# The screen's zorder.
self.zorder = "0"
# The screen's tag.
self.tag = None
# The PyCode object containing the screen's code.
self.code = None
# The variant of screen we're defining.
self.variant = "None" # expr.
# Should we predict this screen?
self.predict = "None" # expr.
# The parameters this screen takes.
self.parameters = None
def after_upgrade(self, version):
if version < 1:
self.modal = "False"
self.zorder = "0"
def define(self, location):
"""
Defines a screen.
"""
renpy.display.screen.define_screen(
self.name,
self,
modal=self.modal,
zorder=self.zorder,
tag=self.tag,
variant=renpy.python.py_eval(self.variant),
predict=renpy.python.py_eval(self.predict),
parameters=self.parameters,
location=self.location,
)
def __call__(self, *args, **kwargs):
scope = kwargs["_scope"]
if self.parameters:
args = scope.get("_args", ())
kwargs = scope.get("_kwargs", { })
values = renpy.ast.apply_arguments(self.parameters, args, kwargs)
scope.update(values)
renpy.python.py_exec_bytecode(self.code.bytecode, locals=scope)
class ScreenParser(Parser):
def __init__(self):
super(ScreenParser, self).__init__("screen")
def parse(self, l, name="_name"):
location = l.get_location()
screen = ScreenLangScreen()
def parse_keyword(l):
if l.match('modal'):
screen.modal = l.require(l.simple_expression)
return True
if l.match('zorder'):
screen.zorder = l.require(l.simple_expression)
return True
if l.match('tag'):
screen.tag = l.require(l.word)
return True
if l.match('variant'):
screen.variant = l.require(l.simple_expression)
return True
if l.match('predict'):
screen.predict = l.require(l.simple_expression)
return True
return False
lineno = l.number
screen.name = l.require(l.word)
screen.parameters = renpy.parser.parse_parameters(l)
while parse_keyword(l):
continue
l.require(':')
l.expect_eol()
l.expect_block('screen statement')
l = l.subblock_lexer()
rv = [ ]
count = 0
with new_variable() as child_name:
while l.advance():
if parse_keyword(l):
while parse_keyword(l):
continue
l.expect_eol()
continue
rv.extend(self.parse_exec("%s = (%s, %d)" % (child_name, name, count), l.number))
c = self.parse_statement(l, child_name)
if c is None:
l.error('Expected a screen language statement.')
rv.extend(c)
count += 1
node = ast.Module(body=rv, lineno=lineno, col_offset=0)
ast.fix_missing_locations(node)
LineNumberNormalizer().visit(node)
# Various bits of debugging code:
# print ast.dump(node, True, True)
# a = compile(node, 'foo', 'exec')
# import dis
# dis.dis(a)
# import unparse
# print
# print screen.name, "-----------------------------------------"
# unparse.Unparser(node)
screen.code = renpy.ast.PyCode(node, location, 'exec')
return screen
screen_parser = ScreenParser()
screen_parser.add(all_statements)
def parse_screen(l):
"""
Parses the screen statement.
"""
global filename
filename = l.filename
screen = screen_parser.parse(l)
return screen
```
#### File: T PC Windows Version/renpy/ui.py
```python
from __future__ import print_function
import sys
import renpy.display
import renpy.text
from renpy.display.behavior import is_selected, is_sensitive
##############################################################################
# Special classes that can be subclassed from the outside.
class Action(renpy.object.Object):
"""
This can be passed to the clicked method of a button or hotspot. It is
called when the action is selected. The other methods determine if the
action should be displayed insensitive or disabled.
"""
# Alt text.
alt = None
def get_sensitive(self):
return True
def get_selected(self):
return False
def periodic(self, st):
return
def predict(self):
return
def __call__(self):
raise Exception("Not implemented")
class BarValue(renpy.object.Object):
"""
This can be passed to the value method of bar and hotbar.
"""
# Alt text.
alt = "Bar"
def replaces(self, other):
return
def periodic(self, st):
return
def get_adjustment(self):
raise Exception("Not implemented")
def get_style(self):
return "bar", "vbar"
##############################################################################
# Things we can add to. These have two methods: add is called with the
# widget we're adding. close is called when the thing is ready to be
# closed.
class Addable(object):
# A style_prefix associates with this addable.
style_prefix = None
def get_layer(self):
return Exception("Operation can only be performed on a layer.")
class Layer(Addable):
def __init__(self, name):
self.name = name
def add(self, d, key):
renpy.game.context(-1).scene_lists.add(self.name, d, key=key)
def close(self, d):
stack.pop()
if d and d != self.name:
raise Exception("ui.close closed layer %s, not the expected %r." % (self.name, d))
def get_layer(self):
return self.name
def __repr__(self):
return "<Layer: %r>" % self.name
class Many(Addable):
"""
A widget that takes many children.
"""
def __init__(self, displayable, imagemap, style_prefix):
self.displayable = displayable
self.imagemap = imagemap
self.style_prefix = style_prefix
def add(self, d, key):
self.displayable.add(d)
def close(self, d):
stack.pop()
if self.imagemap:
imagemap = imagemap_stack.pop()
imagemap.cache.finish()
if d and d != self.displayable:
raise Exception("ui.close closed %r, not the expected %r." % (self.displayable, d))
def __repr__(self):
return "<Many: %r>" % self.displayable
class One(Addable):
"""
A widget that expects exactly one child.
"""
def __init__(self, displayable, style_prefix):
self.displayable = displayable
self.style_prefix = style_prefix
def add(self, d, key):
self.displayable.add(d)
stack.pop()
def close(self, d):
raise Exception("Widget %r expects a child." % self.displayable)
def __repr__(self):
return "<One: %r>" % self.displayable
class Detached(Addable):
"""
Used to indicate a widget is detached from the stack.
"""
def __init__(self, style_prefix):
self.style_prefix = style_prefix
def add(self, d, key):
self.child = d
stack.pop()
def close(self, d):
raise Exception("Detached expects to be given a child.")
class ChildOrFixed(Addable):
"""
If one widget is added, then it is added directly to our
parent. Otherwise, a fixed is added to our parent, and all
the widgets are added to that.
"""
def __init__(self, style_prefix):
self.queue = [ ]
self.style_prefix = style_prefix
def add(self, d, key):
self.queue.append(d)
def close(self, d):
stack.pop()
if len(self.queue) == 1:
implicit_add(self.queue[0])
else:
fixed()
for i in self.queue:
implicit_add(i)
close()
if d is not None:
raise Exception("Did not expect to close %r." % d)
# A stack of things we can add to.
stack = [ ]
# A stack of open ui.ats.
at_stack = [ ]
# The tag for the displayble being added to the layer.
add_tag = None
# A stack of Imagemap objects.
imagemap_stack = [ ]
# Called at the end of the init phase, and from the screen
# prediction code.
def reset():
global stack
global at_stack
global imagemap_stack
stack = [ Layer('transient') ]
at_stack = [ ]
imagemap_stack = [ ]
renpy.game.post_init.append(reset)
def interact(type='misc', roll_forward=None, **kwargs): # @ReservedAssignment
"""
:doc: ui
:args: (roll_forward=None, mouse='default')
Causes an interaction with the user, and returns the result of that
interaction. This causes Ren'Py to redraw the screen and begin processing
input events. When a displayable returns a value in response to an event,
that value is returned from ui.interact, and the interaction ends.
This function is rarely called directly. It is usually called by other
parts of Ren'Py, including the say statement, menu statement, with statement,
pause statement, call screen, :func:`renpy.input`, among many other
functions. However, it can be called directly if necessary.
When an interaction ends, the transient layer and all screens shown with
transient=True are cleared from the scene lists.
The following arguments are documented. As other, undocumented arguments
exist for Ren'Py's internal use, please pass all arguments as keyword
arguments.
`roll_forward`
The information that will be returned by this function when a
roll forward occurs. (If None, the roll forward is ignored.) This
should usually be passed the result of the :func:`renpy.roll_forward_info`
function.
`mouse`
The style of mouse cursor to use during this function.
"""
if stack is None:
raise Exception("Interaction not allowed during init phase.")
if renpy.config.skipping == "fast":
renpy.config.skipping = None
if len(stack) != 1:
raise Exception("ui.interact called with non-empty widget/layer stack. Did you forget a ui.close() somewhere?\nStack was "+('\n'.join([str(item) for item in stack])))
if at_stack:
raise Exception("ui.interact called with non-empty at stack.")
renpy.game.context().info._current_interact_type = type
rv = renpy.game.interface.interact(roll_forward=roll_forward, **kwargs)
renpy.game.context().info._last_interact_type = type
if renpy.exports.in_fixed_rollback() and roll_forward is not None:
return roll_forward
else:
return rv
def tag(name):
global add_tag
add_tag = name
def child_or_fixed():
"""
Causes the current widget to be given child-fixed semantics. This
means that we will queue up children added to it. If there is one
child, that child will be added to the widget directly. Otherwise,
a fixed will be created, and the children will be added to that.
"""
stack.append(ChildOrFixed(stack[-1].style_prefix))
def remove(d):
layer = stack[-1].get_layer()
renpy.game.context(-1).scene_lists.remove(layer, d)
def remove_above(d):
layer = stack[-1].get_layer()
renpy.game.context(-1).scene_lists.remove_above(layer, d)
def at(transform):
"""
:doc: ui
Specifies a transform that is applied to the next displayable to
be created. This is largely obsolete, as all UI functions now take
an `at` argument.
"""
at_stack.append(transform)
def clear():
layer = stack[-1].get_layer()
renpy.game.context(-1).scene_lists.clear(layer)
def detached():
"""
:doc: ui
Do not add the next displayable to any later or container. Use this if
you want to assign the result of a ui function to a variable.
"""
rv = Detached(stack[-1].style_prefix)
stack.append(rv)
return rv
def layer(name):
"""
:doc: ui
Adds displayables to the layer named `name`. The later must be
closed with :func:`ui.close`.
"""
stack.append(Layer(name))
def close(d=None):
"""
:doc: ui
:args: ()
Closes a displayable created with by a UI function. When a
displayable is closed, we add new displayables to its parent,
or to the layer if no displayable is open.
"""
stack[-1].close(d)
if not stack:
raise Exception("ui.close() called when no layer or widget is open.")
def reopen(w, clear):
stack.append(Many(w))
if clear:
w.children[:] = [ ]
def context_enter(w):
if isinstance(renpy.ui.stack[-1], renpy.ui.Many) and renpy.ui.stack[-1].displayable is w:
return
raise Exception("%r cannot be used as a context manager.", type(w).__name__)
def context_exit(w):
close(w)
NoStylePrefixGiven = renpy.object.Sentinel("NoStylePrefixGiven")
def combine_style(style_prefix, style_suffix):
"""
Combines a style prefix and style suffix to create a style name, then
returns the style object corresoinding to that name.
"""
if style_prefix is None:
new_style = style_suffix
else:
new_style = style_prefix + "_" + style_suffix
return renpy.style.get_style(new_style) # @UndefinedVariable
def prefixed_style(style_suffix):
"""
Combines the default style prefix with a style suffix.
"""
return combine_style(stack[-1].style_prefix, style_suffix)
# The screen we're using as we add widgets. None if there isn't a
# screen.
screen = None
class Wrapper(renpy.object.Object):
def __reduce__(self):
return self.name
def __init__(self, function, one=False, many=False, imagemap=False, replaces=False, style=None, **kwargs):
# The name assigned to this wrapper. This is used to serialize us correctly.
self.name = None
# The function to call.
self.function = function
# Should we add one or many things to this wrapper?
self.one = one
self.many = many or imagemap
self.imagemap = imagemap
# Should the function be given the replaces parameter,
# specifiying the displayable it replaced?
self.replaces = replaces
# Default keyword arguments to the function.
self.kwargs = kwargs
# Default style (suffix).
self.style = style
def __call__(self, *args, **kwargs):
global add_tag
if not stack:
raise Exception("Can't add displayable during init phase.")
# Pull out the special kwargs, widget_id, at, and style_prefix.
widget_id = kwargs.pop("id", None) # @ReservedAssignment
at_list = kwargs.pop("at", [ ])
if not isinstance(at_list, (list, tuple)):
at_list = [ at_list ]
style_prefix = stack[-1].style_prefix
if "style_group" in kwargs:
style_prefix = kwargs.pop("style_group")
if "style_prefix" in kwargs:
style_prefix = kwargs.pop("style_prefix")
# Figure out the keyword arguments, based on the parameters.
if self.kwargs:
keyword = self.kwargs.copy()
keyword.update(kwargs)
else:
keyword = kwargs
# Should we transfer data from an old version of this screen?
old_transfers = screen and screen.old_transfers
# Should we add?
do_add = True
if screen:
if widget_id in screen.widget_properties:
keyword.update(screen.widget_properties[widget_id])
if widget_id in screen.hidden_widgets:
do_add = False
if old_transfers:
old_main = screen.old_widgets.get(widget_id, None)
if self.replaces and old_main is not None:
keyword["replaces"] = old_main
else:
old_main = None
style_suffix = keyword.pop("style_suffix", None) or self.style
if style_suffix and ("style" not in keyword):
keyword["style"] = combine_style(style_prefix, style_suffix)
try:
w = self.function(*args, **keyword)
except TypeError as e:
etype, e, tb = sys.exc_info(); etype
if tb.tb_next is None:
e.args = (e.args[0].replace("__call__", "ui." + self.name), )
del tb # Important! Prevents memory leaks via our frame.
raise
main = w._main or w
# Migrate the focus.
if (old_main is not None) and (not screen.hiding):
renpy.display.focus.replaced_by[id(old_main)] = main
# Wrap the displayable based on the at_list and at_stack.
atw = w
while at_stack:
at_list.append(at_stack.pop())
for atf in at_list:
if isinstance(atf, renpy.display.motion.Transform):
atw = atf(child=atw)
else:
atw = atf(atw)
# Add to the displayable at the bottom of the stack.
if do_add:
stack[-1].add(atw, add_tag)
# Update the stack, as necessary.
if self.one:
stack.append(One(w, style_prefix))
elif self.many:
stack.append(Many(w, self.imagemap, style_prefix))
# If we have an widget_id, record the displayable, the transform,
# and maybe take the state from a previous transform.
if screen and widget_id is not None:
screen.widgets[widget_id] = main
if isinstance(atw, renpy.display.motion.Transform):
screen.transforms[widget_id] = atw
if old_transfers:
oldt = screen.old_transforms.get(widget_id, None)
else:
oldt = None
atw.take_state(oldt)
atw.take_execution_state(oldt)
# Clear out the add_tag.
add_tag = None
return main
##############################################################################
# Widget functions.
def _add(d, **kwargs):
d = renpy.easy.displayable(d)
if d._duplicatable:
d = d._duplicate(None)
d._unique()
rv = d
if kwargs:
rv = renpy.display.motion.Transform(child=d, **kwargs)
return rv
add = Wrapper(_add)
def _implicit_add(d):
"""
A faster version of add to use when we know `d` is a displayable and isn't
transformed.
"""
return d
implicit_add = Wrapper(_implicit_add)
def _image(im, **properties):
d = renpy.display.im.image(im, loose=True, **properties)
if d._duplicatable:
d = d._duplicate(None)
d._unique()
return d
image = Wrapper(_image)
null = Wrapper(renpy.display.layout.Null)
text = Wrapper(renpy.text.text.Text, style="text", replaces=True)
hbox = Wrapper(renpy.display.layout.MultiBox, layout="horizontal", style="hbox", many=True)
vbox = Wrapper(renpy.display.layout.MultiBox, layout="vertical", style="vbox", many=True)
fixed = Wrapper(renpy.display.layout.MultiBox, layout="fixed", style="fixed", many=True)
default_fixed = Wrapper(renpy.display.layout.MultiBox, layout="fixed", many=True)
grid = Wrapper(renpy.display.layout.Grid, style="grid", many=True)
side = Wrapper(renpy.display.layout.Side, style="side", many=True)
def _sizer(maxwidth=None, maxheight=None, **properties):
return renpy.display.layout.Container(xmaximum=maxwidth, ymaximum=maxheight, **properties)
sizer = Wrapper(_sizer, one=True)
window = Wrapper(renpy.display.layout.Window, style="window", one=True, child=None)
frame = Wrapper(renpy.display.layout.Window, style="frame", one=True, child=None)
keymap = Wrapper(renpy.display.behavior.Keymap)
saybehavior = Wrapper(renpy.display.behavior.SayBehavior)
pausebehavior = Wrapper(renpy.display.behavior.PauseBehavior)
soundstopbehavior = Wrapper(renpy.display.behavior.SoundStopBehavior)
def _key(key, action=None, activate_sound=None):
if action is None:
raise Exception("Action is required in ui.key.")
return renpy.display.behavior.Keymap(activate_sound=activate_sound, **{ key : action})
key = Wrapper(_key)
class ChoiceActionBase(Action):
"""
Base class for choice actions. The choice is identified by a label
and value. The class will automatically determine the rollback state
and supply correct "sensitive" and "selected" information to the
widget.
If a location is supplied, it will check whether the choice was
previously visited and mark it so if it is chosen.
"""
sensitive = True
def __init__(self, label, value, location=None, block_all=None, sensitive=True, args=None, kwargs=None):
self.label = label
self.value = value
self.location = location
self.sensitive = sensitive
if block_all is None:
self.block_all = renpy.config.fix_rollback_without_choice
else:
self.block_all = block_all
self.chosen = None
if self.location:
self.chosen = renpy.game.persistent._chosen # @UndefinedVariable
if self.chosen is None:
self.chosen = renpy.game.persistent._chosen = { }
# The arguments passed to a menu choice.
self.args = args
self.kwargs = kwargs
def get_sensitive(self):
return (self.sensitive and
not renpy.exports.in_fixed_rollback() or (not self.block_all and self.get_selected()))
def get_selected(self):
roll_forward = renpy.exports.roll_forward_info()
return renpy.exports.in_fixed_rollback() and roll_forward == self.value
def get_chosen(self):
if self.chosen is None:
return False
return (self.location, self.label) in self.chosen
class ChoiceReturn(ChoiceActionBase):
"""
:doc: blockrollback
A menu choice action that returns `value`, while managing the button
state in a manner consistent with fixed rollback. (See block_all for
a description of the behavior.)
`label`
The label text of the button. For imagebuttons and hotspots this
can be anything. This label is used as a unique identifier of
the options within the current screen. Together with `location`
it is used to store whether this option has been chosen.
`value`
The value this is returned when the choice is chosen.
`location`
A unique location identifier for the current choices screen.
`block_all`
If false, the button is given the selected role if it was
the chosen choice, and insensitive if it was not selected.
If true, the button is always insensitive during fixed
rollback.
If None, the value is taken from the :var:`config.fix_rollback_without_choice`
variable.
When true is given to all items in a screen, it will
become unclickable (rolling forward will still work). This can
be changed by calling :func:`ui.saybehavior` before the call
to :func:`ui.interact`.
"""
def __call__(self):
if self.chosen is not None:
self.chosen[(self.location, self.label)] = True
return self.value
class ChoiceJump(ChoiceActionBase):
"""
:doc: blockrollback
A menu choice action that returns `value`, while managing the button
state in a manner consistent with fixed rollback. (See block_all for
a description of the behavior.)
`label`
The label text of the button. For imagebuttons and hotspots this
can be anything. This label is used as a unique identifier of
the options within the current screen. Together with `location`
it is used to store whether this option has been chosen.
`value`
The location to jump to.
`location`
A unique location identifier for the current choices screen.
`block_all`
If false, the button is given the selected role if it was
the chosen choice, and insensitive if it was not selected.
If true, the button is always insensitive during fixed
rollback.
If None, the value is taken from the :var:`config.fix_rollback_without_choice`
variable.
When true is given to all items in a screen, it will
become unclickable (rolling forward will still work). This can
be changed by calling :func:`ui.saybehavior` before the call
to :func:`ui.interact`.
"""
def get_selected(self):
roll_forward = renpy.exports.roll_forward_info()
# renpy.exports.call_screen create a checkpoint with the jump exception
if isinstance(roll_forward, renpy.game.JumpException):
roll_forward = roll_forward.args[0]
return renpy.exports.in_fixed_rollback() and roll_forward == self.value
def __call__(self):
if self.chosen is not None:
self.chosen[(self.location, self.label)] = True
renpy.exports.jump(self.value)
def menu(menuitems,
style='menu',
caption_style='menu_caption',
choice_style='menu_choice',
choice_chosen_style='menu_choice_chosen',
choice_button_style='menu_choice_button',
choice_chosen_button_style='menu_choice_chosen_button',
location=None,
focus=None,
default=False,
**properties):
# menu is now a conglomeration of other widgets. And bully for it.
renpy.ui.vbox(style=style, **properties)
for label, val in menuitems:
if val is None:
renpy.ui.text(label, style=caption_style)
else:
text = choice_style
button = choice_button_style
clicked = ChoiceReturn(label, val, location)
if clicked.get_chosen():
text = choice_chosen_style
button = choice_chosen_button_style
if isinstance(button, basestring):
button = getattr(renpy.game.style, button)
if isinstance(text, basestring):
text = getattr(renpy.game.style, text)
button = button[label]
text = text[label]
renpy.ui.textbutton(label,
style=button,
text_style=text,
clicked=clicked,
focus=focus,
default=default)
close()
input = Wrapper(renpy.display.behavior.Input, exclude='{}', style="input", replaces=True) # @ReservedAssignment
def imagemap_compat(ground,
selected,
hotspots,
unselected=None,
style='imagemap',
button_style='hotspot',
**properties):
if isinstance(button_style, basestring):
button_style = getattr(renpy.game.style, button_style)
fixed(style=style, **properties)
if unselected is None:
unselected = ground
add(ground)
for x0, y0, x1, y1, result in hotspots:
if result is None:
continue
action = ChoiceReturn(result, result)
selected_img = renpy.display.layout.LiveCrop((x0, y0, x1 - x0, y1 - y0), selected)
imagebutton(renpy.display.layout.LiveCrop((x0, y0, x1 - x0, y1 - y0), unselected),
selected_img,
selected_idle_image=selected_img,
selected_insensitive_image=selected_img,
clicked=action,
style=button_style[result],
xpos=x0,
xanchor=0,
ypos=y0,
yanchor=0,
focus_mask=True,
)
close()
button = Wrapper(renpy.display.behavior.Button, style='button', one=True)
def _imagebutton(idle_image=None,
hover_image=None,
insensitive_image=None,
activate_image=None,
selected_idle_image=None,
selected_hover_image=None,
selected_insensitive_image=None,
selected_activate_image=None,
idle=None,
hover=None,
insensitive=None,
selected_idle=None,
selected_hover=None,
selected_insensitive=None,
image_style=None,
auto=None,
**properties):
def choice(a, b, name, required=False):
if a:
return a
if b:
return b
if auto is not None:
rv = renpy.config.imagemap_auto_function(auto, name)
if rv is not None:
return rv
if required:
if auto:
raise Exception("Imagebutton does not have a %s image. (auto=%r)." % (name, auto))
else:
raise Exception("Imagebutton does not have a %s image." % (name, ))
return None
idle = choice(idle, idle_image, "idle", required=True)
hover = choice(hover, hover_image, "hover")
insensitive = choice(insensitive, insensitive_image, "insensitive")
selected_idle = choice(selected_idle, selected_idle_image, "selected_idle")
selected_hover = choice(selected_hover, selected_hover_image, "selected_hover")
selected_insensitive = choice(selected_insensitive, selected_insensitive_image, "selected_insensitive")
return renpy.display.behavior.ImageButton(
idle,
hover,
insensitive_image=insensitive,
activate_image=activate_image,
selected_idle_image=selected_idle,
selected_hover_image=selected_hover,
selected_insensitive_image=selected_insensitive,
selected_activate_image=selected_activate_image,
**properties)
imagebutton = Wrapper(_imagebutton, style="image_button")
def _textbutton(label, clicked=None, style=None, text_style=None, substitute=True, scope=None, **kwargs):
text_kwargs, button_kwargs = renpy.easy.split_properties(kwargs, "text_", "")
# Deal with potentially bad keyword arguments. (We'd get these if the user
# writes text_align instead of text_text_align.)
if "align" in text_kwargs:
if isinstance(text_kwargs["align"], float):
text_kwargs.pop("align")
text_kwargs.pop("y_fudge", None)
if style is None:
style = prefixed_style("button")
if text_style is None:
text_style = renpy.style.get_text_style(style, prefixed_style('button_text')) # @UndefinedVariable
rv = renpy.display.behavior.Button(style=style, clicked=clicked, **button_kwargs)
text = renpy.text.text.Text(label, style=text_style, substitute=substitute, scope=scope, **text_kwargs)
rv.add(text)
rv._main = text
rv._composite_parts = [ text ]
return rv
textbutton = Wrapper(_textbutton)
def _label(label, style=None, text_style=None, substitute=True, scope=None, **kwargs):
text_kwargs, label_kwargs = renpy.easy.split_properties(kwargs, "text_", "")
if style is None:
style = prefixed_style('label')
if text_style is None:
text_style = renpy.style.get_text_style(style, prefixed_style('label_text')) # @UndefinedVariable
rv = renpy.display.layout.Window(None, style=style, **label_kwargs)
text = renpy.text.text.Text(label, style=text_style, substitute=substitute, scope=scope, **text_kwargs)
rv.add(text)
rv._main = text
rv._composite_parts = [ text ]
return rv
label = Wrapper(_label)
adjustment = renpy.display.behavior.Adjustment
def _bar(*args, **properties):
if len(args) == 4:
width, height, range, value = args # @ReservedAssignment
if len(args) == 2:
range, value = args # @ReservedAssignment
width = None
height = None
else:
range = 1 # @ReservedAssignment
value = 0
width = None
height = None
if "width" in properties:
width = properties.pop("width")
if "height" in properties:
height = properties.pop("height")
if "range" in properties:
range = properties.pop("range") # @ReservedAssignment
if "value" in properties:
value = properties.pop("value")
if "style" not in properties:
if isinstance(value, BarValue):
if properties["vertical"]:
style = value.get_style()[1]
else:
style = value.get_style()[0]
if isinstance(style, basestring):
style = prefixed_style(style)
properties["style"] = style
return renpy.display.behavior.Bar(range, value, width, height, **properties)
bar = Wrapper(_bar, vertical=False, replaces=True)
vbar = Wrapper(_bar, vertical=True, replaces=True)
slider = Wrapper(_bar, style='slider', replaces=True)
vslider = Wrapper(_bar, style='vslider', replaces=True)
scrollbar = Wrapper(_bar, style='scrollbar', replaces=True)
vscrollbar = Wrapper(_bar, style='vscrollbar', replaces=True)
def _autobar_interpolate(range, start, end, time, st, at, **properties): # @ReservedAssignment
if st > time:
t = 1.0
redraw = None
else:
t = st / time
redraw = 0
value = start + t * (end - start)
return renpy.display.behavior.Bar(range, value, None, None, **properties), redraw
autobar_interpolate = renpy.curry.curry(_autobar_interpolate)
def _autobar(range, start, end, time, **properties): # @ReservedAssignment
return renpy.display.layout.DynamicDisplayable(autobar_interpolate(range, start, end, time, **properties))
autobar = Wrapper(_autobar)
transform = Wrapper(renpy.display.motion.Transform, one=True, style='transform')
_viewport = Wrapper(renpy.display.viewport.Viewport, one=True, replaces=True, style='viewport')
_vpgrid = Wrapper(renpy.display.viewport.VPGrid, many=True, replaces=True, style='vpgrid')
VIEWPORT_SIZE = 32767
def viewport_common(vpfunc, _spacing_to_side, scrollbars=None, **properties):
if scrollbars is None:
return vpfunc(**properties)
(vscrollbar_properties, scrollbar_properties, side_properties, viewport_properties, core_properties) = \
renpy.easy.split_properties(properties, "vscrollbar_", "scrollbar_", "side_", "viewport_", "")
if renpy.config.position_viewport_side:
from renpy.sl2.slproperties import position_property_names
for k, v in core_properties.items():
if k in position_property_names:
side_properties[k] = v
elif _spacing_to_side and (k == "spacing"):
side_properties[k] = v
else:
viewport_properties[k] = v
else:
viewport_properties.update(core_properties)
if renpy.config.prefix_viewport_scrollbar_styles and (scrollbars != "vertical"):
scrollbar_properties.setdefault("style", prefixed_style("scrollbar"))
else:
scrollbar_properties.setdefault("style", "scrollbar")
if renpy.config.prefix_viewport_scrollbar_styles and (scrollbars != "horizontal"):
vscrollbar_properties.setdefault("style", prefixed_style("vscrollbar"))
else:
vscrollbar_properties.setdefault("style", "vscrollbar")
alt = viewport_properties.get("alt", "viewport")
scrollbar_properties.setdefault("alt", renpy.minstore.__(alt) + " " + renpy.minstore.__("horizontal scroll"))
vscrollbar_properties.setdefault("alt", renpy.minstore.__(alt) + " " + renpy.minstore.__("vertical scroll"))
if scrollbars == "vertical":
if renpy.config.scrollbar_child_size:
viewport_properties.setdefault("child_size", (None, VIEWPORT_SIZE))
side("c r", **side_properties)
rv = vpfunc(**viewport_properties)
addable = stack.pop()
vscrollbar(adjustment=rv.yadjustment, **vscrollbar_properties)
close()
stack.append(addable)
return rv
elif scrollbars == "horizontal":
if renpy.config.scrollbar_child_size:
viewport_properties.setdefault("child_size", (VIEWPORT_SIZE, None))
side("c b", **side_properties)
rv = vpfunc(**viewport_properties)
addable = stack.pop()
scrollbar(adjustment=rv.xadjustment, **scrollbar_properties)
close()
stack.append(addable)
return rv
else:
if renpy.config.scrollbar_child_size:
viewport_properties.setdefault("child_size", (VIEWPORT_SIZE, VIEWPORT_SIZE))
side("c r b", **side_properties)
rv = vpfunc(**viewport_properties)
addable = stack.pop()
vscrollbar(adjustment=rv.yadjustment, **vscrollbar_properties)
scrollbar(adjustment=rv.xadjustment, **scrollbar_properties)
close()
stack.append(addable)
return rv
def viewport(**properties):
return viewport_common(_viewport, True, **properties)
def vpgrid(**properties):
return viewport_common(_vpgrid, False, **properties)
conditional = Wrapper(renpy.display.behavior.Conditional, one=True)
timer = Wrapper(renpy.display.behavior.Timer, replaces=True)
drag = Wrapper(renpy.display.dragdrop.Drag, replaces=True, one=True)
draggroup = Wrapper(renpy.display.dragdrop.DragGroup, replaces=True, many=True)
mousearea = Wrapper(renpy.display.behavior.MouseArea, replaces=True)
##############################################################################
# New-style imagemap related functions.
class Imagemap(object):
"""
Stores information about the images used by an imagemap.
"""
alpha = True
cache_param = True
def __init__(self, insensitive, idle, selected_idle, hover, selected_hover, selected_insensitive, alpha, cache):
self.insensitive = renpy.easy.displayable(insensitive)
self.idle = renpy.easy.displayable(idle)
self.selected_idle = renpy.easy.displayable(selected_idle)
self.hover = renpy.easy.displayable(hover)
self.selected_hover = renpy.easy.displayable(selected_hover)
self.selected_insensitive = renpy.easy.displayable(selected_insensitive)
self.alpha = alpha
self.cache_param = cache
self.cache = renpy.display.imagemap.ImageMapCache(cache)
def reuse(self):
self.cache = renpy.display.imagemap.ImageMapCache(self.cache_param)
def _imagemap(ground=None, hover=None, insensitive=None, idle=None, selected_hover=None, selected_idle=None, selected_insensitive=None, auto=None, alpha=True, cache=True, style='imagemap', **properties):
def pick(variable, name, other):
if variable:
return variable
if auto:
for i in name:
fn = renpy.config.imagemap_auto_function(auto, i)
if fn is not None:
return fn
if other is not None:
return other
raise Exception("Could not find a %s image for imagemap." % name[0])
ground = pick(ground, ( "ground", "idle" ), idle)
idle = pick(idle, ( "idle", ), ground)
selected_idle = pick(selected_idle, ( "selected_idle", ), idle)
hover = pick(hover, ( "hover", ), ground)
selected_hover = pick(selected_hover, ( "selected_hover", ), hover)
insensitive = pick(insensitive, ("insensitive", ), ground)
selected_insensitive = pick(selected_insensitive, ("selected_insensitive", ), hover)
imagemap_stack.append(
Imagemap(
insensitive,
idle,
selected_idle,
hover,
selected_hover,
selected_insensitive,
alpha,
cache))
properties.setdefault('fit_first', True)
rv = renpy.display.layout.MultiBox(layout='fixed', **properties)
parts = [ ]
if ground:
rv.add(renpy.easy.displayable(ground))
parts.append(ground)
box = renpy.display.layout.MultiBox(layout='fixed')
rv.add(box)
parts.append(box)
rv._main = box
rv._composite_parts = parts
return rv
imagemap = Wrapper(_imagemap, imagemap=True, style='imagemap')
def _hotspot(spot, style='hotspot', **properties):
if not imagemap_stack:
raise Exception("hotspot expects an imagemap to be defined.")
imagemap = imagemap_stack[-1]
x, y, w, h = spot
idle = imagemap.idle
hover = imagemap.hover
selected_idle = imagemap.selected_idle
selected_hover = imagemap.selected_hover
insensitive = imagemap.insensitive
selected_insensitive = imagemap.selected_insensitive
idle = imagemap.cache.crop(idle, spot)
hover = imagemap.cache.crop(hover, spot)
selected_idle = imagemap.cache.crop(selected_idle, spot)
selected_hover = imagemap.cache.crop(selected_hover, spot)
insensitive = imagemap.cache.crop(insensitive, spot)
selected_insensitive = imagemap.cache.crop(selected_insensitive, spot)
properties.setdefault("xpos", x)
properties.setdefault("xanchor", 0)
properties.setdefault("ypos", y)
properties.setdefault("yanchor", 0)
properties.setdefault("xminimum", w)
properties.setdefault("xmaximum", w)
properties.setdefault("yminimum", h)
properties.setdefault("ymaximum", h)
if imagemap.alpha:
focus_mask = True
else:
focus_mask = None
properties.setdefault("focus_mask", focus_mask)
return renpy.display.behavior.Button(
None,
idle_background=idle,
selected_idle_background=selected_idle,
hover_background=hover,
selected_hover_background=selected_hover,
insensitive_background=insensitive,
selected_insensitive_background=selected_insensitive,
style=style,
**properties)
hotspot_with_child = Wrapper(_hotspot, style="hotspot", one=True)
def hotspot(*args, **kwargs):
hotspot_with_child(*args, **kwargs)
null()
def _hotbar(spot, adjustment=None, range=None, value=None, **properties): # @ReservedAssignment
if (adjustment is None) and (range is None) and (value is None):
raise Exception("hotbar requires either an adjustment or a range and value.")
if not imagemap_stack:
raise Exception("hotbar expects an imagemap to be defined.")
imagemap = imagemap_stack[-1]
x, y, w, h = spot
properties.setdefault("xpos", x)
properties.setdefault("ypos", y)
properties.setdefault("xanchor", 0)
properties.setdefault("yanchor", 0)
fore_bar=imagemap.cache.crop(imagemap.selected_idle, spot)
aft_bar=imagemap.cache.crop(imagemap.idle, spot)
hover_fore_bar=imagemap.cache.crop(imagemap.selected_hover, spot)
hover_aft_bar=imagemap.cache.crop(imagemap.hover, spot)
if h > w:
properties.setdefault("bar_vertical", True)
properties.setdefault("bar_invert", True)
fore_bar, aft_bar = aft_bar, fore_bar
hover_fore_bar, hover_aft_bar = hover_aft_bar, hover_fore_bar
return renpy.display.behavior.Bar(
adjustment=adjustment,
range=range,
value=value,
fore_bar=fore_bar,
aft_bar=aft_bar,
hover_fore_bar=hover_fore_bar,
hover_aft_bar=hover_aft_bar,
fore_gutter=0,
aft_gutter=0,
bar_resizing=False,
thumb=None,
thumb_shadow=None,
thumb_offset=0,
xmaximum=w,
ymaximum=h,
**properties)
hotbar = Wrapper(_hotbar, style="hotbar", replaces=True)
##############################################################################
# Curried functions, for use in clicked, hovered, and unhovered.
def _returns(v):
return v
returns = renpy.curry.curry(_returns)
def _jumps(label, transition=None):
if isinstance(transition, basestring):
transition = getattr(renpy.config, transition)
if transition is not None:
renpy.exports.transition(transition)
raise renpy.exports.jump(label)
jumps = renpy.curry.curry(_jumps)
def _jumpsoutofcontext(label):
raise renpy.game.JumpOutException(label)
jumpsoutofcontext = renpy.curry.curry(_jumpsoutofcontext)
def callsinnewcontext(*args, **kwargs):
return renpy.exports.curried_call_in_new_context(*args, **kwargs)
def invokesinnewcontext(*args, **kwargs):
return renpy.exports.curried_invoke_in_new_context(*args, **kwargs)
def gamemenus(*args):
if args:
return callsinnewcontext("_game_menu", _game_menu_screen=args[0])
else:
return callsinnewcontext("_game_menu")
##############################################################################
# The on statement.
on = Wrapper(renpy.display.behavior.OnEvent)
##############################################################################
# A utility function so CDD components can be given an id.
def screen_id(id_, d):
"""
:doc: ui
Assigns the displayable `d` the screen widget id `id_`, as if it had
been created by a screen statement with that id.
"""
if screen is None:
raise Exception("ui.screen_id must be called from within a screen.")
screen.widget_id[id_] = d
##############################################################################
# Postamble
# Update the wrappers to have names.
k, v = None, None
for k, v in globals().iteritems():
if isinstance(v, Wrapper):
v.name = k
```
#### File: Builds/T PC Windows Version/T.py
```python
from __future__ import print_function
import os
import sys
import warnings
# Functions to be customized by distributors. ################################
# Given the Ren'Py base directory (usually the directory containing
# this file), this is expected to return the path to the common directory.
def path_to_common(renpy_base):
return renpy_base + "/renpy/common"
# Given a directory holding a Ren'Py game, this is expected to return
# the path to a directory that will hold save files.
def path_to_saves(gamedir, save_directory=None):
import renpy # @UnresolvedImport
if save_directory is None:
save_directory = renpy.config.save_directory
save_directory = renpy.exports.fsencode(save_directory)
# Makes sure the permissions are right on the save directory.
def test_writable(d):
try:
fn = os.path.join(d, "test.txt")
open(fn, "w").close()
open(fn, "r").close()
os.unlink(fn)
return True
except:
return False
# Android.
if renpy.android:
paths = [
os.path.join(os.environ["ANDROID_OLD_PUBLIC"], "game/saves"),
os.path.join(os.environ["ANDROID_PRIVATE"], "saves"),
os.path.join(os.environ["ANDROID_PUBLIC"], "saves"),
]
for rv in paths:
if os.path.isdir(rv) and test_writable(rv):
break
print("Saving to", rv)
# We return the last path as the default.
return rv
if renpy.ios:
from pyobjus import autoclass
from pyobjus.objc_py_types import enum
NSSearchPathDirectory = enum("NSSearchPathDirectory", NSDocumentDirectory=9)
NSSearchPathDomainMask = enum("NSSearchPathDomainMask", NSUserDomainMask=1)
NSFileManager = autoclass('NSFileManager')
manager = NSFileManager.defaultManager()
url = manager.URLsForDirectory_inDomains_(
NSSearchPathDirectory.NSDocumentDirectory,
NSSearchPathDomainMask.NSUserDomainMask,
).lastObject()
# url.path seems to change type based on iOS version, for some reason.
try:
rv = url.path().UTF8String().decode("utf-8")
except:
rv = url.path.UTF8String().decode("utf-8")
print("Saving to", rv)
return rv
# No save directory given.
if not save_directory:
return gamedir + "/saves"
# Search the path above Ren'Py for a directory named "Ren'Py Data".
# If it exists, then use that for our save directory.
path = renpy.config.renpy_base
while True:
if os.path.isdir(path + "/Ren'Py Data"):
return path + "/Ren'Py Data/" + save_directory
newpath = os.path.dirname(path)
if path == newpath:
break
path = newpath
# Otherwise, put the saves in a platform-specific location.
if renpy.macintosh:
rv = "~/Library/RenPy/" + save_directory
return os.path.expanduser(rv)
elif renpy.windows:
if 'APPDATA' in os.environ:
return os.environ['APPDATA'] + "/RenPy/" + save_directory
else:
rv = "~/RenPy/" + renpy.config.save_directory
return os.path.expanduser(rv)
else:
rv = "~/.renpy/" + save_directory
return os.path.expanduser(rv)
# Returns the path to the Ren'Py base directory (containing common and
# the launcher, usually.)
def path_to_renpy_base():
renpy_base = os.path.dirname(os.path.realpath(sys.argv[0]))
renpy_base = os.path.abspath(renpy_base)
return renpy_base
##############################################################################
# Doing the version check this way also doubles as an import of ast,
# which helps py2exe et al.
try:
import ast; ast
except:
raise
print("Ren'Py requires at least python 2.6.")
sys.exit(0)
android = ("ANDROID_PRIVATE" in os.environ)
# Android requires us to add code to the main module, and to command some
# renderers.
if android:
__main__ = sys.modules["__main__"]
__main__.path_to_renpy_base = path_to_renpy_base
__main__.path_to_common = path_to_common
__main__.path_to_saves = path_to_saves
os.environ["RENPY_RENDERER"] = "gl"
def main():
renpy_base = path_to_renpy_base()
# Add paths.
if os.path.exists(renpy_base + "/module"):
sys.path.append(renpy_base + "/module")
sys.path.append(renpy_base)
# This is looked for by the mac launcher.
if os.path.exists(renpy_base + "/renpy.zip"):
sys.path.append(renpy_base + "/renpy.zip")
# Ignore warnings that happen.
warnings.simplefilter("ignore", DeprecationWarning)
# Start Ren'Py proper.
try:
import renpy.bootstrap
except ImportError:
print("Could not import renpy.bootstrap. Please ensure you decompressed Ren'Py", file=sys.stderr)
print("correctly, preserving the directory structure.", file=sys.stderr)
raise
renpy.bootstrap.bootstrap(renpy_base)
if __name__ == "__main__":
main()
``` |
{
"source": "jmanday/Master",
"score": 2
} |
#### File: Practicas/Practica2/keras5.py
```python
import numpy as np
import os
import cv2
import sys
import glob
import pandas as pd
from PIL import Image
from keras.datasets import cifar10
from multiprocessing import Pool, cpu_count, freeze_support
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.models import Sequential
from keras.utils import np_utils
from keras import backend as K
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from checkScore2 import getScore
SEED = 14
NUM_EPOCHS = 3
BATCH_SIZE = 96
SIZE = 64
NUM_CLASSES = 3
SOLUTION_SGT1 = "solution_stg1_release.csv"
# get the size of image
def im_multi(path):
try:
im_stats_im_ = Image.open(path)
return [path, {'size': im_stats_im_.size}]
except:
print(path)
return [path, {'size': [0, 0]}]
# add a new field (size) to dataset
def im_stats(im_stats_df):
im_stats_d = {}
p = Pool(cpu_count())
ret = p.map(im_multi, im_stats_df['path'])
for i in range(len(ret)):
im_stats_d[ret[i][0]] = ret[i][1]
im_stats_df['size'] = im_stats_df['path'].map(lambda x: ' '.join(str(s) for s in im_stats_d[x]['size']))
return im_stats_df
# get cv2 each image
def get_im_cv2(path):
img = cv2.imread(path)
resized = cv2.resize(img, (SIZE, SIZE), cv2.INTER_LINEAR)
return [path, resized]
def normalize_image_features(paths):
imf_d = {}
p = Pool(cpu_count())
ret = p.map(get_im_cv2, paths)
for i in range(len(ret)):
imf_d[ret[i][0]] = ret[i][1]
ret = []
fdata = [imf_d[f] for f in paths]
fdata = np.array(fdata, dtype=np.uint8)
fdata = fdata.astype('float32')
fdata = fdata / 255
return fdata
# create file "train.npy" with the datas of normalized image
def load_data_train():
train = glob.glob('./input2/train/**/*.jpg')
#train_additional = glob.glob('./input/additional/**/*.jpg')
#train = train + train_additional
train = pd.DataFrame([[p.split('/')[3],p.split('/')[4],p] for p in train], columns = ['type','image','path'])
train = im_stats(train)
train = train[train['size'] != '0 0'].reset_index(drop=True) #remove bad images
train_data = normalize_image_features(train['path'])
np.save('train.npy', train_data, allow_pickle=True, fix_imports=True)
return train
# create file "test.npy" with the datas of normalized image
def load_data_test():
test = glob.glob('./input2/test/*.jpg')
test = pd.DataFrame([[p.split('/')[3],p] for p in test], columns = ['image','path']) #[::20] #limit for Kaggle Demo
test_data = normalize_image_features(test['path'])
np.save('test.npy', test_data, allow_pickle=True, fix_imports=True)
return test
# create the model to CNN
def create_model(opt_='adamax'):
if K.image_data_format() == 'channels_first':
input_shape_aux = (3, SIZE, SIZE)
else:
input_shape_aux = (SIZE, SIZE, 3)
model = Sequential()
model.add(Conv2D(16, (3, 3), input_shape=input_shape_aux, padding='same', activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Conv2D(16, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(16, (3, 3), input_shape=input_shape_aux, padding='same', activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Conv2D(16, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, activation='softmax'))
model.compile(optimizer=opt_, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
# create file "train_target.npy" with the types of class
def get_class(train):
le = LabelEncoder()
train_target = le.fit_transform(train['type'].values)
#print(le.classes_) #in case not 1 to 3 order
np.save('train_target.npy', train_target, allow_pickle=True, fix_imports=True)
#return train_target
if __name__ == "__main__":
name_file_csv = sys.argv[1]
print("\n- Load datas ...")
test_data = load_data_test() # get dataset with values of test images
train = load_data_train() # get dataset with values of train images
get_class(train) # get the class
test_id = test_data.image.values
np.save('test_id.npy', test_id, allow_pickle=True, fix_imports=True)
train_data = np.load('train.npy') # load data train images
test_data = np.load('test.npy')
train_target = np.load('train_target.npy')
print('Generating validation data...')
x_train, x_val_train, y_train, y_val_train = train_test_split(train_data, train_target, test_size=0, random_state=SEED)
print('Data augmentation...')
datagen = ImageDataGenerator(rotation_range=0.3, zoom_range=0.3)
datagen.fit(train_data)
print('Training model...')
model = create_model()
print(model.summary())
model.fit_generator(generator=datagen.flow(x_train, y_train, batch_size=BATCH_SIZE, shuffle=True),
validation_data=(x_val_train, y_val_train),
epochs=NUM_EPOCHS, steps_per_epoch=len(x_train))
print('Predicting...')
pred = model.predict_proba(test_data)
print('Exporting to CSV...')
df = pd.DataFrame(pred, columns=['Type_1', 'Type_2', 'Type_3'])
df['image_name'] = test_id
df.to_csv(name_file_csv, index=False)
getScore(name_file_csv)
```
#### File: restaurantes/appRestaurants/views.py
```python
from django.shortcuts import render, HttpResponse
from django.http import HttpResponseRedirect
from django.core.files.base import ContentFile
import requests
import json
from appRestaurants.models import restaurantes
from appRestaurants.models import address
from appRestaurants.forms import AddRestaurant
import gridfs
import base64
# Create your views here.
def index(request):
context = {} # Aquí van la las variables para la plantilla
return render(request,'app/login.html', context)
def home(request):
context = {}
return render(request,'app/home.html', context)
def addRestaurant(request):
if request.method == 'POST':
form = AddRestaurant(request.POST, request.FILES)
if form.is_valid():
form_id = form.cleaned_data['id_restaurant']
form_name = form.cleaned_data['name']
form_street = form.cleaned_data['direc']
form_zipcode = form.cleaned_data['zipcode']
form_city = form.cleaned_data['city']
form_image = request.FILES['image']
instanceAddress = address(city = form_city, street = form_street, zipcode = form_zipcode, coord = [0, 0])
instanceRestaurant = restaurantes(name = form_name, id_restaurant = form_id, image = form_image, address = instanceAddress)
instanceRestaurant.save()
return HttpResponseRedirect('/restaurantes/home')
else:
form = AddRestaurant()
return render(request, 'app/addRestaurant.html', {'form': form})
def searchRestaurant(request):
if request.method == 'POST':
context = {}
idRestaurant = request.POST.get('searched_id')
rest = restaurantes.objects(id_restaurant = idRestaurant)
for d in rest:
context["name"] = d.name
context["id_restaurant"] = d.id_restaurant
context["street"] = d.address.street
context["city"] = d.address.city
context["zipcode"] = d.address.zipcode
context["image"] = d.image
if (d.image):
context["image"] = base64.b64encode(d.image.read())
return render(request,'app/getRestaurant.html', {'data': context})
else:
return HttpResponseRedirect('/restaurantes/searchRestaurant/')
def listRestaurants(request):
context = {
"rests": restaurantes.objects, # todos los restaurantes
}
return render(request, 'app/listRestaurants.html', {'data': context['rests']})
#def getRestaurant(request):
#listRests = []
#auxRest = {}
#rests = restaurantes.objects, # todos los restaurantes
#for r in rests:
# print (len(r))
# for i in r:
# auxRest["name"] = i.name
# auxRest["id_restaurant"] = i.id_restaurant
# auxRest["street"] = i.address.street
# auxRest["city"] = i.address.city
# auxRest["zipcode"] = i.address.zipcode
# auxRest["image"] = i.image
# if (i.image):
# auxRest["image"] = base64.b64encode(i.image.read())
# listRests.append(auxRest)
# auxRest = {}
#return render(request, 'app/listRestaurants.html', {'data': listRests})
def profile(request):
parsedData = []
if request.method == 'POST':
username = request.POST.get('user')
req = requests.get('https://api.github.com/users/' + username)
jsonList = []
jsonList.append(json.loads(req.text))
userData = {}
for data in jsonList:
userData['name'] = data['name']
userData['blog'] = data['blog']
userData['email'] = data['email']
userData['public_gists'] = data['public_gists']
userData['public_repos'] = data['public_repos']
userData['avatar_url'] = data['avatar_url']
userData['followers'] = data['followers']
userData['following'] = data['following']
parsedData.append(userData)
return render(request, 'app/profile.html', {'data': parsedData})
```
#### File: restaurantes/restaurantes/urls.py
```python
from django.conf.urls import include, url
from django.contrib import admin
from registration.backends.simple.views import RegistrationView
# Create a new class that redirects the user to the index page, if successful at logging
class MyRegistrationView(RegistrationView):
def get_success_url(self,request, user):
return '/restaurantes/home'
urlpatterns = [
url(r'^restaurantes/', include('appRestaurants.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('registration.backends.simple.urls')),
]
```
#### File: TFM/scripts/calculateSuccess.py
```python
import os
import sys
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import csv
PATH_SCRIPTS = "/Users/jesusgarciamanday/Documents/Master/TFM/scripts/"
class DataMatching:
def __init__(self, imageClassifier, imageMatching, value):
self.imageClassifier = imageClassifier
self.imageMatching = imageMatching
self.value = value
def upperMatching(fileName):
count = 0
success = 0
res = fileName.split("/")
name = res[len(res)-1]
detector = name.split("-")[len(name.split("-")) - 1]
detector = detector.split(".")[0]
descriptor = name.split("-")[len(name.split("-")) - 2]
print("\nDetector: ", detector, " ", "Descriptor: ", descriptor)
with open(fileName, 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
if (count != 0):
if (row[0] == row[1]):
success += 1
count += 1
result = (success/count) * 100
print("Precisión: ", result, "%\n")
if __name__ == "__main__":
fileName = sys.argv[1]
upperMatching (fileName)
```
#### File: TFM/scripts/gauss.py
```python
import numpy as np
import cv2
import os
import commands
import sys
PATH_INPUT_IRIS = "/Users/jesusgarciamanday/Documents/Master/TFM/databases/CASIA_V4/"
PATH_OUTPUTS_IRIS_GAUSS = "/Users/jesusgarciamanday/Documents/Master/TFM/outputs/gauss_filter/15/"
PATH_OUTPUTS_IRIS_SEGMENTATION_GAUSS = "/Users/jesusgarciamanday/Documents/Master/TFM/outputs/improved_iris(clahe)/"
PATH_OUTPUTS_FEATURE_EXTRACTION = "/Users/jesusgarciamanday/Documents/Master/TFM/outputs/gauss_filter_iris_segmentation/"
METHODS_SEGMENTATION = ['caht', 'wahet']
# method to apply a gaussian filter to the images
def gauss_filter(iris_CASIAV4):
for path_img in iris_CASIAV4:
img = cv2.imread(path_img,0)
# apply Gaussian filter (Arguments are optional).
blur = cv2.GaussianBlur(img,(5,5),15,15)
vaux = path_img.split('/')
name = vaux[len(vaux)-1].split('-')[0]
name = name.split('.')[0]
print PATH_OUTPUTS_IRIS_GAUSS + name + '-gauss.png'
cv2.imwrite(PATH_OUTPUTS_IRIS_GAUSS + name + '-gauus.png',blur)
# method to apply a gaussian filter to the images from segmentation's iris
def gauss_filter_iris_segmentation(iris_segmentation):
for key, value in iris_segmentation.iteritems():
for path_img in value:
img = cv2.imread(path_img,0)
# apply Gaussian filter (Arguments are optional).
blur = cv2.GaussianBlur(img,(5,5),0)
vaux = path_img.split('/')
name = vaux[len(vaux)-1].split('-')[0]
print PATH_OUTPUTS_FEATURE_EXTRACTION + key + '/' + name + '-gauss.png'
cv2.imwrite(PATH_OUTPUTS_FEATURE_EXTRACTION + key + '/' + name + '-gauss.png',blur)
# method to obtein the textures from iris after that method CLAHE has been applied
def getIrisCLAHE():
vnames_images = []
images_seg = {}
for type_seg in METHODS_SEGMENTATION:
direc = os.chdir(PATH_OUTPUTS_IRIS_CLAHE + "/" + type_seg)
cmd = ("ls")
res = commands.getstatusoutput(cmd)
if res[0] == 0:
vnames_images = res[1].split()
aux_name = []
for name_file in vnames_images:
cmd = ("find $PWD -type f -name " + name_file)
res = commands.getstatusoutput(cmd)
if res[0] == 0:
aux_name.append(res[1])
images_seg[type_seg] = aux_name
return images_seg
# method to obtein the textures from iris
def getIrisCASIAV4():
vnames_images = []
direc = os.chdir(PATH_INPUT_IRIS)
cmd = ("ls")
res = commands.getstatusoutput(cmd)
if res[0] == 0:
vnames_images = res[1].split()
aux_name = []
for name_file in vnames_images:
vname_file = name_file.split('.')
if (vname_file[1] != 'txt'):
cmd = ("find $PWD -type f -name " + name_file)
res = commands.getstatusoutput(cmd)
if res[0] == 0:
aux_name.append(res[1])
return aux_name
if __name__ == "__main__":
#iris_image_CLAHE = getIrisCLAHE()
#gauss_filter_iris_segmentation(iris_image_CLAHE)
iris_CASIAV4 = getIrisCASIAV4()
gauss_filter(iris_CASIAV4)
```
#### File: TFM/scripts/matching-BruteForce-ORB.py
```python
import os
import sys
import numpy as np
import cv2
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import csv
PATH_DATABASES_TRAIN_IMAGES = "/Users/jesusgarciamanday/Documents/Master/TFM/databases/train-images4/" #fuente de datos de imágenes segmentadas para comparar
PATH_DATABASES_QUERY_IMAGES = "/Users/jesusgarciamanday/Documents/Master/TFM/databases/query-images4/" #fuente de datos de imágenes a clasificar
class DataMatching:
def __init__(self, imageSegmented, imageClassifier):
self.imageSegmented = imageSegmented
self.imageClassifier = imageClassifier
def getNameFile(file):
fileName = ""
if (len(file.split("R")) > 1):
fileName = file.split("R")[0]
else:
if (len(file.split("L")) > 1):
fileName = file.split("L")[0]
return fileName
def matchingBruteForceORB(filesTrainImages, filesQueryImages):
valuesDataMatching = []
results = []
filesTrainImages.sort()
filesQueryImages.sort()
# Initiate ORB detector
orb = cv2.ORB_create()
for fImgQuery in filesQueryImages:
nMatch = 0
index = 0
firstImage = ""
imgQuery = cv2.imread(PATH_DATABASES_QUERY_IMAGES + fImgQuery,0)
nameImgQuery = getNameFile(fImgQuery)
for fImgTrain in filesTrainImages:
imgSeg = cv2.imread(PATH_DATABASES_TRAIN_IMAGES + fImgTrain,0)
nameImgTrain = getNameFile(fImgTrain)
# find the keypoints and descriptors with SIFT
kp1, des1 = orb.detectAndCompute(imgQuery,None)
kp2, des2 = orb.detectAndCompute(imgSeg,None)
# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1,des2, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
if ((nameImgTrain == firstImage) or (firstImage == "")):
nMatch = nMatch + len(good)
else:
valuesDataMatching.append({"imageQuery": nameImgQuery, "imageTrain": firstImage, "value": nMatch})
nMatch = len(good)
firstImage = nameImgTrain
firstImage = ""
nMatch = 0
valM = max(valuesDataMatching, key=lambda item:item['value'])
print(valM)
results.append(valM)
valuesDataMatching = []
with open('results2-BruteForce-ORB.csv', 'w') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(['Image Query', 'Image Train', "Value matching"])
for rs in results:
filewriter.writerow([rs['imageQuery'], rs['imageTrain'], rs['value']])
if __name__ == "__main__":
filesTrainImages = os.listdir(PATH_DATABASES_TRAIN_IMAGES)
filesQueryImages = os.listdir(PATH_DATABASES_QUERY_IMAGES)
matchingBruteForceORB(filesTrainImages, filesQueryImages)
```
#### File: TFM/scripts/matchLowe.py
```python
import os
import commands
import sys
from util import transpose
import math
import numpy as np
PATH_OUTPUTS_FEATURE_EXTRACTION = "/Users/jesusgarciamanday/Documents/Master/TFM/outputs/feature_extraction/Lip-vireo/prueba/"
PATH_DATABASE_CASIAV4 = "/Users/jesusgarciamanday/Documents/Master/TFM/databases/CASIA_V4/"
class Imagen:
quadrants = []
features = []
points = []
def __init__(self, features, points):
self.features = features
self.points = points
# método que devuelve los nombres de las carpetas y ficheros que se encuentran
# en el path especificado por parámetro
def obteinFolders(path):
direc = os.chdir(path)
cmd = ("ls")
res = commands.getstatusoutput(cmd)
if res[0] == 0:
names = res[1].split()
return names
# método que lee los ficheros de descriptores de las imagenes y las almacena en una estructura general
# donde se separan por detectores, es decir, hay 3 grupos de imagenes donde cada imagen almacena en
# en una matriz los valores de sus descriptores (128x128) (puntos x caracteristicas por cada punto)
def readFeatures(foldersFeaturesSIFT):
cont = 1
num_rows = 0
imagenes_by_detector = []
imagenes = []
features = []
vfeature = []
points = []
for fold in foldersFeaturesSIFT:
path = PATH_OUTPUTS_FEATURE_EXTRACTION + fold + "/"
cmd = ("ls " + path)
res = commands.getstatusoutput(cmd)
if res[0] == 0:
nameFiles = res[1].split()
for nFile in nameFiles:
with open(path + nFile) as f:
lines = f.read().split("\n")
for l in lines:
if ((cont > 2) and (cont <= len(lines))):
if (num_rows == 11):
features.append(vfeature)
num_rows = 0
vfeature = []
aux = l.split()
if (len(aux) > 0):
points.append(aux[0])
points.append(aux[1])
else:
vfeature = vfeature + l.split()
num_rows += 1
else:
if(cont == 2):
aux = l.split()
points.append(aux[0])
points.append(aux[1])
cont += 1
img = Imagen(features, points)
imagenes.append(img)
features = []
points =[]
imagenes_by_detector.append(imagenes)
imagenes = []
cont = 1
return imagenes_by_detector
# método que calcula la distancia de Lowe entre dos imagenes
# comparando los puntos de interés que se encuentren en el
# mismo cuadrante de una imagen con la otra
def getResultComparison(img1, img2):
timg2 = np.transpose(img2.features)
resultV = []
resultTotal = []
i = 0
j = 0
sum = 0
while i < img1.features:
while j < img2.features:
if (img1.quadrants[i] == img2.quadrants[j]):
v1 = img1.features[i]
v2 = img2.features[j]
k = 0
while k < len(v1):
sum += v1[k] * v2[k]
k += 1
sum = math.sqrt(sum)
resultV.apped(sum)
sum = 0
j += 1
i += 1
j = 0
resultTotal.append(sorted(resultV))
resultV = []
return resultTotal
# método que va recibiendo los conjuntos de imágenes por cada detectores
# y va obtiendo el resultado de compararlas 2 a 2
def comparison(images):
i = 0
result = []
while i < (len(images)-1):
j = i + 1
while j < len(images):
img1 = images[i]
img2 = images[j]
result = getResultComparison(img1, img2)
j += 1
i += 1
# método que le establece a cada punto de interés de la imagen el cuadrante al que
# pertenece, dependiendo del centro de cada imagen que es extraído de los ficheros
# de la base de datos
def setQuadrant(allImages, datasImages):
cont = 0
cX = cY = r = -1
pX = pY = -1
dImage = []
i = 0
j = 0
vquadrant = []
for imgsDet in allImages:
i = 0
for img in imgsDet:
cX = datasImages[i][0]
cY = datasImages[i][1]
r = datasImages[i][2]
j = 0
print "cX: "+ cX + " " + "cY: " + cY + " " + "r: " + r
while j < (len(img.points)-1):
pX = img.points[j]
pY = img.points[j+1]
print "pX: "+ pX + " " + "pY: " + pY
if ((int(pX) >= int(cX)) and (int(pY) >= int(cY))):
vquadrant.append(1)
else:
if ((int(pX) >= int(cX)) and (int(pY) <= int(cY))):
vquadrant.append(4)
else:
if ((int(pX) <= int(cX)) and (int(pY) <= int(cY))):
vquadrant.append(3)
else:
if ((int(pX) <= int(cX)) and (int(pY) >= int(cY))):
vquadrant.append(2)
j += 2
img.quadrants = vquadrant
vquadrant = []
i += 1
# método que obtiene los datos de cada imagen de la base de datos. La coordenada X e Y
# del centro de la imagen y el radio del límite exterior
def getDatasImages(files):
datasImage = []
mdatasAllImages = []
for file in files:
ext = file.split(".")
if (ext[1] == "txt"):
with open(PATH_DATABASE_CASIAV4 + file) as f:
line = f.read().split("\n")
line = line[0].split(" ")
datasImage.append(line[0])
datasImage.append(line[1])
datasImage.append(line[5])
mdatasAllImages.append(datasImage)
datasImage = []
return mdatasAllImages
if __name__ == "__main__":
foldersFeaturesSIFT = obteinFolders(PATH_OUTPUTS_FEATURE_EXTRACTION)
allImages = readFeatures(foldersFeaturesSIFT)
files = obteinFolders(PATH_DATABASE_CASIAV4)
datasImages = getDatasImages(files)
setQuadrant(allImages, datasImages)
#print allImages[0][0].features
#print allImages[0][0].points
print allImages[0][0].quadrants
#print allImages[1][0].features
#print allImages[1][0].points
print allImages[1][0].quadrants
#print len(allImages[0][0].quadrants)
#for images in allImages:
# comparisonLowe(images)
``` |
{
"source": "jmandel/aries-rfcs",
"score": 2
} |
#### File: aries-rfcs/code/test_rfcs.py
```python
import os
import pytest
import re
import sys
import tempfile
# We're not using python packages, so we have to solve the path problem the old way.
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')))
import rfcs
@pytest.fixture
def scratch_space():
x = tempfile.TemporaryDirectory()
yield x
x.cleanup()
def test_index(scratch_space):
temp_idx = os.path.join(scratch_space.name, 'index.md')
import generate_index
generate_index.main(temp_idx)
perm_idx = os.path.join(os.path.dirname(__file__), '../index.md')
if os.system('diff %s %s' % (perm_idx, temp_idx)):
pytest.fail("/index.md needs to be updated. Run python code/generate_index.py.")
def test_links():
import check_links
assert check_links.main() == 0
def test_rfc_metadata():
errors = []
def e(rfc, msg):
errors.append(rfc.relpath.replace('README.md','') + ': ' + msg)
for rfc in rfcs.walk():
if not bool(rfc.title): e(rfc, 'no title found')
if rfc.category not in rfc.relpath: e(rfc, 'category does not match path')
if rfc.category[:-1] not in rfc.tags: e(rfc, 'category not in tags')
opposite_category = 'feature' if rfc.category == 'concepts' else 'concept'
if opposite_category in rfc.tags: e(rfc, 'opposite category in tags')
if rfc.status not in rfcs.status_list: e(rfc, 'status is not canonical')
if not re.match(r'\d{4}$', rfc.num): e(rfc, 'num is not 4 digits')
if not re.search(r'\d{4}-\d{2}-\d{2}', rfc.since): e(rfc, 'since does not contain yyyy-mm-dd')
if rfc.start_date:
if not re.search(r'\d{4}-\d{2}-\d{2}', rfc.start_date): e(rfc, 'start_date does not contain yyyy-mm-dd')
if bool(rfc.authors):
if '@' in rfc.authors:
if not re.search(r'\[.*?\]\([^)]+@.*?\)', rfc.authors): e(rfc, 'email is not clickable')
else:
e(rfc, 'no authors found')
if ','.join(rfc.tags) != ','.join(rfc.tags).lower(): e(rfc, 'tags are case-sensitive')
if rfc.supersedes:
if not re.search(r'\[.*?\]\(.*?\)', rfc.supersedes): e(rfc, 'supersedes does not contain hyperlink')
if rfc.superseded_by:
if not re.search(r'\[.*?\]\(.*?\)', rfc.superseded_by): e(rfc, 'superseded_by does not contain hyperlink')
if rfc.impl_count > 0:
if rfc.status == 'PROPOSED': e(rfc, 'should not be PROPOSED if it has an impl')
if errors:
msg = '\n' + '\n'.join(errors)
raise BaseException(msg)
def test_impls():
errors = []
def e(rfc, msg):
errors.append(rfc + ': ' + msg)
pretty_for_normalized_names = {}
normalized_for_base_uri = {}
base_uri_for_normalized = {}
refs = []
def append_to_dict(dict, key, value, ref):
if key not in dict:
dict[key] = []
list = dict.get(key)
if value not in list:
list.append(value)
refs.append((dict, key, value, ref))
def track(name, link, path, row_num):
ref = path + ', impl row ' + str(row_num)
norm_name = rfcs.normalize_impl_name(name)
append_to_dict(pretty_for_normalized_names, norm_name, name, ref)
base_uri = rfcs.get_impl_base(link)
append_to_dict(base_uri_for_normalized, norm_name, base_uri, ref)
append_to_dict(normalized_for_base_uri, base_uri, norm_name, ref)
for abspath in rfcs.walk_files():
try:
with open(abspath, 'rt', encoding='utf-8') as f:
txt = f.read()
path = rfcs.relpath(abspath).replace('/README.md', '')
impl_table = rfcs.get_impl_table(txt)
bad_count = False
n = 1
for row in impl_table:
if len(row) == 2:
cell = row[0].strip()
if cell.startswith('['):
name, link = rfcs.split_hyperlink(cell)
if name and link:
track(name, link, path, n)
else:
if (not bad_count):
e(path, 'row %d in impl table does not have 2 columns' % n)
bad_count = True
n += 1
except:
print('Error while processing ' + abspath)
raise
def find_refs(dict, key, value):
matches = []
for ref in refs:
if ref[0] == dict:
if ref[1] == key:
if ref[2] in value:
matches.append(ref[3])
return matches
for key, value in pretty_for_normalized_names.items():
if len(value) > 1:
offenders = '\n'.join(find_refs(pretty_for_normalized_names, key, value))
e(offenders, '\n inconsistent variants on impl name: %s' % ', '.join(['"%s"' % v for v in value]))
for key, value in normalized_for_base_uri.items():
if len(value) > 1:
offenders = '\n'.join(find_refs(normalized_for_base_uri, key, value))
e(offenders, '\n same site maps to multiple impl names: %s' % ', '.join(['"%s"' % v for v in value]))
for key, value in base_uri_for_normalized.items():
if len(value) > 1:
offenders = '\n'.join(find_refs(base_uri_for_normalized, key, value))
e(offenders, '\n impl name "%s" maps to multiple sites: %s' % (key, ', '.join(['"%s"' % v for v in value])))
if errors:
msg = '\n' + '\n'.join(errors)
raise BaseException(msg)
``` |
{
"source": "jmandel/smart_sample_apps",
"score": 2
} |
#### File: meds_adherence/MedCheck/views.py
```python
from django.http import HttpResponse
from django.template import Context
from django.template.loader import get_template
from django.template import RequestContext
from django.utils import simplejson
from django.shortcuts import render_to_response
# The SMArt API uses these libraries, all from smart_client_python
import datetime
import urllib
import meds_adherence.settings as settings
import smart_client.smart as smart
import smart_client.oauth as oauth
import adherence_check
# Basic configuration: the consumer key and secret we'll use
# to OAuth-sign requests.
SMART_SERVER_OAUTH = {'consumer_key': '',
'consumer_secret': 'smartapp-secret'}
# The SMArt container we're planning to talk to
SMART_SERVER_PARAMS = {
'api_base' : ''
}
# Global variables
ISO_8601_DATETIME = '%Y-%m-%d'
last_pill_dates = {}
#===========================================
# The index page is the generally the first
# page to appear when the application is started.
#===========================================
def index(request):
indexpage = get_template('index.html')
# Get information from the cookie
#cookies = request.COOKIES
try:
#smart_connect_cookie = cookies[cookies.keys()[0]]
smart_oauth_header_quoted = request.GET.get('oauth_header')
smart_oauth_header = urllib.unquote(smart_oauth_header_quoted)
except:
return "Couldn't find a parameter to match the name 'oauth_header'"
# Current context information
oa_params, client = get_smart_client(smart_oauth_header)
# User or physician and the patient name
user = oa_params["smart_user_id"]
patientID = oa_params["smart_record_id"]
# Represent the list as an RDF graph
# Note the general pattern: GET /records/{record_id}/medications/
# Get the medication list for this context
medications = client.records_X_medications_GET()
query = """
PREFIX dcterms:<http://purl.org/dc/terms/>
PREFIX sp:<http://smartplatforms.org/terms#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT ?med ?name ?quant ?when
WHERE {
?med rdf:type sp:Medication .
?med sp:drugName ?medc.
?medc dcterms:title ?name.
?med sp:fulfillment ?fill.
?fill sp:dispenseDaysSupply ?quant.
?fill dcterms:date ?when.
}
"""
pills = medications.query(query)
birthday, patient_name = get_birthday_name(client)
drug = 'all'
# We only want to call the adherence_check once
if settings.PATIENT_ID == patientID:
meds_flags, gaps, refill_data, refill_day, actualMPR = settings.ADHERE_VARS
else:
settings.PATIENT_ID = patientID
meds_flags, gaps, refill_data, refill_day, actualMPR = adherence_check.all_tests(pills, drug, birthday)
settings.ADHERE_VARS = [meds_flags, gaps, refill_data, refill_day, actualMPR]
drug_class_array = {}
for n in range(len(meds_flags)):
drug_class_array[meds_flags[n][5]] = 1
sorted_drug_class_list = sorted(drug_class_array.keys())
variables = Context({
'head_title': u'Medication Adherence Monitor',
'user': user,
'patientID': patientID,
'meds_flags': meds_flags,
'media_root': settings.MEDIA_ROOT,
'patient_name': patient_name,
'drug_class_array': sorted_drug_class_list,
'oauth_header': urllib.quote(smart_oauth_header),
})
output = indexpage.render(variables)
return HttpResponse(output)
#===========================================
#===========================================
def get_smart_client(authorization_header, resource_tokens=None):
""" Initialize a new SmartClient"""
oa_params = oauth.parse_header(authorization_header)
resource_tokens={'oauth_token': oa_params['smart_oauth_token'],
'oauth_token_secret':oa_params['smart_oauth_token_secret']}
SMART_SERVER_PARAMS['api_base'] = oa_params['smart_container_api_base']
SMART_SERVER_OAUTH['consumer_key'] = oa_params['smart_app_id']
ret = smart.SmartClient(SMART_SERVER_OAUTH['consumer_key'],
SMART_SERVER_PARAMS,
SMART_SERVER_OAUTH,
resource_tokens)
ret.record_id=oa_params['smart_record_id']
return oa_params, ret
#===========================================
#===========================================
#def update_pill_dates(med, name, quant, when):
# def runs_out():
# print "Date", when
# s = datetime.datetime.strptime(str(when), ISO_8601_DATETIME)
# s += datetime.timedelta(days=int(float(str(quant))))
# return s
#
# r = runs_out()
# previous_value = last_pill_dates.setdefault(name, r)
# if r > previous_value:
# last_pill_dates[name] = r
def get_birthday_name(client):
#init_smart_client()
demographics = client.records_X_demographics_GET()
query_demo = """
PREFIX foaf:<http://xmlns.com/foaf/0.1/>
PREFIX v:<http://www.w3.org/2006/vcard/ns#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT ?firstname ?lastname ?gender ?birthday
WHERE {
?r v:n ?n .
?n rdf:type v:Name .
?n v:given-name ?firstname .
?n v:family-name ?lastname .
?r foaf:gender ?gender .
?r v:bday ?birthday .
}
"""
# Get the birthday
demo = demographics.query(query_demo)
for d in demo:
patient_name = d[0] + " " + d[1]
birthday = d[3]
return birthday, patient_name
def risk(request):
""" This function creates data and serves detailed information about
adherence for specific medications."""
# Get the name of the drug if a specific one was requested.
# The default is 'all' drugs.
drug = request.GET.get('drug', 'all')
# Get information from the cookie
#cookies = request.COOKIES
try:
#smart_connect_cookie = cookies[cookies.keys()[0]]
smart_oauth_header_quoted = request.GET.get('oauth_header')
smart_oauth_header = urllib.unquote(smart_oauth_header_quoted)
except:
return "Couldn't find a parameter to match the name 'oauth_header'"
# Current context information
oa_params, client = get_smart_client(smart_oauth_header)
# User or physician and the patient name
# user = oa_params["smart_user_id"]
# patientID = oa_params["smart_record_id"]
# Get the medication list for this context
medications = client.records_X_medications_GET()
query = """
PREFIX dcterms:<http://purl.org/dc/terms/>
PREFIX sp:<http://smartplatforms.org/terms#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT ?med ?name ?quant ?when
WHERE {
?med rdf:type sp:Medication .
?med sp:drugName ?medc.
?medc dcterms:title ?name.
?med sp:fulfillment ?fill.
?fill sp:dispenseDaysSupply ?quant.
?fill dcterms:date ?when.
}
"""
pills = medications.query(query)
#birthday, patient_name = get_birthday_name(client)
# The the fulfillment gap and MPR prediction data
#meds_predict, predictedMPR, actualMPR = logR.gapPredict_logR(pills)
#meds_flags, gaps, refill_data, refill_day, actualMPR = gap_check.gap_check(pills, drug, birthday)
meds_flags, gaps, refill_data, refill_day, actualMPR = settings.ADHERE_VARS
names = {}
if drug == 'all': # get all the drugs for this patient
for pill in pills:
name = pill[1]
names[name] = name
d = pill[3]
else: # only use the specified drug name
meds_flags_new = []
names[drug] = drug
for item in meds_flags:
if drug == item[0]:
meds_flags_new.append(item)
meds_flags = meds_flags_new
ad_data = []
med_names = []
# data = {'prescribed':[], 'actual':[], '60':[], '90':[], '120':[], 'warning':[]}
for n in names.keys():
mpr = actualMPR[n]
d = {}
d["title"] = str(names[n])
med_names.append(names[n])
d["subtitle"] = 'adherence'
d["ranges"] = [ mpr[0], mpr[1], mpr[2] ]
d["measures"] = [1.0]
d["markers"] = [mpr[3]]
ad_data.append(d)
drug_class_array = {}
for n in range(len(meds_flags)):
drug_class_array[meds_flags[n][5]] = 1
sorted_drug_class_array = sorted(drug_class_array.keys())
# Determine width and height of chart by the number of drugs to be shown
width = 400
height = 100
if len(names) == 1:
width = 500
height = 200
variables = RequestContext(request, {
'head_title': u'Predicted 1-year medication possession ratio (MPR)',
'ad_data_js': simplejson.dumps(ad_data),
'med_names': med_names,
'meds_flags': meds_flags,
'refill_day': simplejson.dumps(refill_day),
'refill': simplejson.dumps(refill_data),
'gaps': simplejson.dumps(gaps),
'width': width,
'height': height,
'drug_class_array': sorted_drug_class_array,
'oauth_header': urllib.quote(smart_oauth_header),
})
response = render_to_response("risk.html", context_instance=variables )
return HttpResponse(response)
def about(request):
""" This function creates a page with information about the med adherence application."""
page = get_template('about.html')
try:
smart_oauth_header_quoted = request.GET.get('oauth_header')
smart_oauth_header = urllib.unquote(smart_oauth_header_quoted)
except:
return "Couldn't find a parameter to match the name 'oauth_header'"
variables = Context({
'oauth_header': urllib.quote(smart_oauth_header),
})
output = page.render(variables)
return HttpResponse(output)
def choose_med(request):
""" This function creates a page with instructions for the med adherence application."""
page = get_template('choose_med.html')
try:
smart_oauth_header_quoted = request.GET.get('oauth_header')
smart_oauth_header = urllib.unquote(smart_oauth_header_quoted)
except:
return "Couldn't find a parameter to match the name 'oauth_header'"
variables = Context({
'oauth_header': urllib.quote(smart_oauth_header),
})
output = page.render(variables)
return HttpResponse(output)
``` |
{
"source": "jmanderson-usf/CEN4020F21TeamWyoming",
"score": 2
} |
#### File: CEN4020F21TeamWyoming/test/test_ui.py
```python
import os
import json
import pytest
import sys
import helper
# from StringIO import StringIO
import readchar
from readchar import key
from readchar import readkey
from sys import platform as _platform
from inquirer import events
from io import StringIO
# Setup path assignment.
if _platform.startswith('win'):
config_path = '..\\test\\config.json'
path = '..\\src'
else:
config_path = '../test/config.json'
path = '../src'
helper.create_config(config_path)
sys.path.append(path)
os.chdir(path)
import utils
import in_college
# Create actual instance of config object.
config = utils.InCollegeConfig(config_path)
def test_connect_friends(monkeypatch, capsys):
"""Example UI test with captured output and simulated keypresses."""
test_sequence = [
key.ENTER, # choose the friends option.
*'admin', key.ENTER, # enter friend's first name.
*'admin', key.ENTER, # enter friend's last name.
key.UP, key.ENTER, # go back to home screen.
key.UP, key.ENTER, # quit the application.
]
# "Monkeypatch" the function, so that it accepts a series of key inputs.
monkeypatch.setattr('readchar.readkey', lambda: test_sequence.pop(0))
# Run the script and discard return value.
_ = in_college.user_loop(config)
# Receive output from the execution.
captured = capsys.readouterr()
# Check if the stuff printed out as expected.
assert '🎉 admin is InCollege! Hooray!' in captured.out
# Make sure that the error field is empty.
assert captured.err == ''
def test_save_posting(monkeypatch):
"""Example UI test with simulated keypresses and json update assert."""
test_sequence = [
key.DOWN, # scroll to skip the welcome screen.
key.ENTER, # skip welcome screen.
key.ENTER, # sign in into application.
*'admin', key.ENTER, # enter login string.
*'admin', key.ENTER, # enter password string.
key.ENTER, # search for a job.
key.ENTER, # internships.
key.DOWN, key.ENTER, # show list of jobs.
key.ENTER, # show the first job.
key.DOWN, key.ENTER, # save this job.
key.UP, key.ENTER, # go back.
key.UP, key.ENTER, # go back.
key.UP, key.ENTER, # go back.
key.UP, key.ENTER, # logout.
key.UP, key.ENTER, # go back.
key.UP, key.ENTER # quit.
]
monkeypatch.setattr('readchar.readkey', lambda: test_sequence.pop(0))
_ = in_college.user_loop(config)
# We saved only one job so far, check length of the saved list.
assert len(config['accounts']['admin']['saved_jobs']) == 1
# We store saved jobs as a list of ids, so compare the id of interest.
assert config['accounts']['admin']['saved_jobs'][0] == '2'
def test_apply_for_job(monkeypatch):
"""Test sequence of inputs to apply for a job using an account."""
test_sequence = [
key.DOWN, key.ENTER, # skip welcome screen.
key.ENTER, # sign in into application.
*'admin', key.ENTER, # enter login string.
*'admin', key.ENTER, # enter password string.
key.ENTER, # search for a job.
key.ENTER, # internships.
key.DOWN, key.ENTER, # apply for a job.
key.ENTER, # apply for this specific posting.
key.ENTER, # begin application process.
*'12/12/2022', key.ENTER, # enter graduation date.
*'12/13/2022', key.ENTER, # enter start date.
*'I am great', key.ENTER, # enter essay question.
key.UP, key.ENTER, # exit job list.
key.UP, key.ENTER, # go back.
key.UP, key.ENTER, # go back.
key.UP, key.ENTER, # go back.
key.UP, key.ENTER, # logout.
key.UP, key.ENTER, # go back.
key.UP, key.ENTER # quit.
]
# Patch the input.
monkeypatch.setattr('readchar.readkey', lambda: test_sequence.pop(0))
# Run the user loop.
_ = in_college.user_loop(config)
# We applied to only one job so far, check length of the saved list.
assert len(config['accounts']['admin']['applications']) == 1
# We store saved jobs as a dict of keys ids, so compare the id of interest.
assert '2' in config['accounts']['admin']['applications'].keys()
def test_play_video(monkeypatch, capsys):
"""Test "under construction" message for video selection."""
monkeypatch.setattr('readchar.readkey', lambda: test_sequence.pop(0))
test_sequence = [
key.DOWN, key.ENTER, # skip the welcome selection.
*[key.DOWN] * 3, key.ENTER, # scroll to video selection.
key.UP, key.ENTER, # go back to main screen.
key.UP, key.ENTER, # quit the application.
]
# Run the user loop.
_ = in_college.user_loop(config)
# Receive output from the execution.
captured = capsys.readouterr()
# Check if the stuff printed out as expected.
assert captured.out.find('🚨 Playing video 🎥. Under construction. 🚨⚠️')
# Make sure that the error field is empty.
assert captured.err == ''
```
#### File: CEN4020F21TeamWyoming/test/test_utils.py
```python
import os
import json
import pytest
import helper
import sys
from sys import platform as _platform
path = '../src'
if _platform.startswith('win'):
path = '..\\src'
sys.path.append(path)
os.chdir(path)
import utils
def init_testing():
"""Initialize dummy config json for testing purposes."""
test_path = '../test/config.json'
if _platform.startswith('win'):
test_path = '..\\test\\config.json'
helper.create_config(test_path)
return utils.InCollegeConfig(test_path)
def test_utils_login_week1():
config = init_testing()
username, password = '<PASSWORD>', '<PASSWORD>'
assert config.login_valid(username, password) is True
def test_full_name_exists_week1():
config = init_testing()
first, last = 'admin', 'admin'
assert config.full_name_exists(first, last) == True
def test_password_valid_week1():
config = init_testing()
assert config.password_valid('ab') == False
assert config.password_valid('<PASSWORD>') == False
assert config.password_valid('<PASSWORD>!') == False
assert config.password_valid('<PASSWORD>') == False
assert config.password_valid('<PASSWORD>!') == True
def test_create_user_week1():
config = init_testing()
username, firstname, lastname, membership = 'sample', 'sample', 'sample', 'sample'
password1, password2 = '<PASSWORD>', '<PASSWORD>$!'
assert config.create_user(
username,
password2,
firstname,
lastname,
membership
) == True
assert config.create_user(
username,
password1,
firstname,
lastname,
membership
) == False
def test_create_posting_week2():
config = init_testing()
author, title, desc = 'admin', 'sample', 'sample'
employer, location, salary = 'sample', 'sample', 'unpaid'
config.create_posting(author, title, desc, employer, location, salary)
assert len(config['jobs']) == 3
assert config['jobs'][-1]['salary'] == 'unpaid'
def test_save_lang_week3():
config = init_testing()
username1, lang1 = 'admin', 'Spanish'
config.save_lang(username1, lang1)
# Check whether the structure was saved into json.
assert config['accounts'][username1]['language'] == lang1
def test_show_lang_week3(capsys):
config = init_testing()
username = 'admin'
config.show_lang(username)
captured = capsys.readouterr()
# Checking correctly outputted languages.
assert 'English' in captured.out
def test_save_guest_control_week3():
config = init_testing()
username, control_setting_list = 'admin', ['InCollege Email', 'SMS']
config.save_guest_control(username, control_setting_list)
# Checking update in the json file.
assert config.config['guest_control'][username] == control_setting_list
def test_show_guest_control_week3(capsys):
config = init_testing()
username = 'admin'
config.show_guest_control(username)
captured = capsys.readouterr()
# Checking proper output.
assert 'InCollege Email: ON' in captured.out
def test_save_profile_week4():
config = init_testing()
username = 'admin'
profile = {
'title': 'admin',
'major': 'CS',
'university': 'USF',
'about': 'It\'s me',
'experience': [
{'e1': '1111'},
{'e2': '2222'},
{'e3': '3333'}
],
'education': [
{'GED': 'home1'},
{'Bachelor Degree': 'home2'},
{'Master Degree': 'home3'}
]
}
config.save_profile(username, profile)
assert config.config['accounts'][username]['profile'] == profile
def test_edit_profile_week4(capsys):
config = init_testing()
username = 'admin'
config.display_profile(username)
captured = capsys.readouterr()
assert 'admin admin'
'title: admin'
'major: CS'
'university: USF'
'about: It\'s me'
'experience:'
'e1: 1111'
'e2: 2222'
'e3: 3333'
'education:'
'GED: home1'
'Bachelor Degree: home2'
'Master Degree: home3' in captured.out
def test_save_friends_week5():
config = init_testing()
username = 'admin'
friend_list = ['test']
config.save_friends(username, friend_list)
assert config.config['accounts'][username]['friends'] == friend_list
def test_send_friend_request_week5():
config = init_testing()
target_user = 'admin'
sender = 'test'
config.send_friend_request(target_user, sender)
assert 'test' in config.config['accounts'][target_user]['friend_requests']
def test_accept_friend_request_week5():
config = init_testing()
config.send_friend_request('admin', 'test')
user = 'admin'
accepted_username = 'test'
config.accept_friend_request(user, accepted_username)
assert accepted_username in config.config['accounts'][user]['friends']
assert accepted_username not in config.config['accounts'][user]['friend_requests']
assert user in config.config['accounts'][accepted_username]['friends']
def test_decline_friend_request_week5():
config = init_testing()
config.send_friend_request('admin', 'test')
user = 'admin'
declined_username = 'test'
config.decline_friend_request(user, declined_username)
assert declined_username not in config.config['accounts'][user]['friend_requests']
def test_submit_application_week6():
config = init_testing()
config['current_login'] = 'test'
author, title, desc = 'admin', 'sample', 'sample'
employer, location, salary = 'sample', 'sample', 'unpaid'
config.create_posting(author, title, desc, employer, location, salary)
job_id = config.config['jobs'][-1]['id']
config.submit_application('test', job_id, 'grad date', 'start date', 'txt')
assert job_id in config['accounts']['test']['applications']
def test_withdraw_application_week6():
config = init_testing()
config['current_login'] = 'test'
author, title, desc = 'admin', 'sample', 'sample'
employer, location, salary = 'sample', 'sample', 'unpaid'
config.create_posting(author, title, desc, employer, location, salary)
job_id = config.config['jobs'][-1]['id']
config.submit_application('test', job_id, 'grad date', 'start date', 'txt')
config.withdraw_application('test', job_id)
assert job_id not in config.config['accounts']['test']['applications']
def test_get_list_jobs_week6():
config = init_testing()
user = config['accounts']['admin']
jobs = config.get_list_jobs(user)
assert len(jobs) == 2
def test_send_message_week7():
config = init_testing()
config['current_login'] = 'admin'
config['current_login_membership'] = 'pro'
recipient = config['accounts']['test']
message = 'test message'
config.send_message('test', message)
assert {'admin': '[unread]' + message} in recipient['inbox']
config.send_message('admin', message)
assert {'admin': message} not in config['accounts']['admin']['inbox']
config['current_login_membership'] = ''
recipient['inbox'].pop(0)
config.send_message('test', message)
assert {'admin': '[unread]' + message} not in recipient['inbox']
def test_delete_message_week7():
config = init_testing()
config['current_login'] = 'admin'
config['current_login_membership'] = 'pro'
message = 'test: test message'
config['accounts']['admin']['inbox'].append({'test': 'test message'})
config.delete_message(message)
assert {'test': 'test message'} not in config['accounts']['admin']['inbox']
# for this unit test to pass you need to change send_notification and new_user_notification to return the message
def test_new_user_notification_week8():
config = init_testing()
config['current_login'] = 'admin'
config['current_login_membership'] = 'pro'
config.create_user('alexwl', 'Whocares07!', 'alex', 'logorz', 'pro')
notif = config.new_user_notification()
assert notif == '<NAME> has joined InCollege'
# for this unit test to pass you need to change the send_notifcation function to return the message
def test_send_notification_week8():
config = init_testing()
config['current_login'] = 'admin'
config['current_login_membership'] = 'pro'
notif = 'test notif'
assert notif == config.send_notification(notif)
# for this unit test to pass you need to change send_notification and job_posted_notification to return the message
def test_job_posted_notification_week8():
config = init_testing()
config['current_login'] = 'admin'
config['current_login_membership'] = 'pro'
author, title, desc = 'admin', 'sample', 'sample'
employer, location, salary = 'sample', 'sample', 'unpaid'
config.create_posting(author, title, desc, employer, location, salary)
notif = config.job_posted_notification()
assert notif == 'new job sample has been posted.'
def test_save_course_week9() -> None:
config = init_testing()
config.save_course('admin', 'test course')
assert 'test course' in config['accounts']['admin']['courses']
def process_applied_job_API_week10() -> None:
config = init_testing()
config.process_applied_job_API()
with open('MyCollege_appliedJobs.txt', 'r+', encoding='utf-8') as f:
lines = f.readlines()
assert 'Test Title\nadmin\nw' in lines
``` |
{
"source": "jmanderson-usf/CEN4020TeamAlaska",
"score": 3
} |
#### File: CEN4020TeamAlaska/src/Course.py
```python
from src.database_access import database_access
class Course:
def __init__():
pass
# Returns the status a user has on a course or None
@staticmethod
def getCourseStatus(username: str, title: str, db: database_access) -> bool:
sql = "SELECT completed FROM student_courses WHERE username = ? AND title = ?;"
return db.execute(sql, [username, title])
@staticmethod
def getAllCourseTitles(db: database_access) -> list:
sql = "SELECT title FROM courses;"
return db.execute(sql)
@staticmethod
def setCourseStatus(username: str, title: str, completed: bool, db: database_access) -> None:
courseRegistedSQL = "SELECT * FROM student_courses WHERE username = ? AND title = ?"
result = db.execute(courseRegistedSQL, [username, title])
sql = ""
# Create entry if user has never taken the course
if result == []:
sql = "INSERT INTO student_courses VALUES (?,?,?)"
db.execute(sql, [username, title, completed])
else: # update course status if student has taken the course before
sql = "UPDATE student_courses SET completed = ? WHERE username = ? AND title = ?"
db.execute(sql, [completed, username, title])
```
#### File: CEN4020TeamAlaska/src/helpers.py
```python
def validateMenuInput(maxNumber: int) -> int:
while(True):
try:
c = int(input(""))
if(c in range(0, maxNumber + 1)):
return c
else:
print(
"Please try again with an integer between 0 and {}".format(maxNumber))
except:
print("Please try again, Enter a valid integer.")
```
#### File: CEN4020TeamAlaska/src/JobExperience.py
```python
from typing import List
from src.database_access import database_access
class JobExperience:
def __init__(self, username: str, title: str, employer: str, date_start: str, date_end: str, location: str, description: str):
self.username = username
self.title = title
self.employer = employer
self.date_start = date_start
self.date_end = date_end
self.location = location
self.description = description
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.username == other.username and self.title == other.title and self.employer == other.employer and self.date_start == other.date_start and self.date_end == other.date_end and self.location == other.location and self.description == other.description
return False
def DbAddJobExperience(self, db: database_access) -> None:
sql = "INSERT INTO job_experience VALUES (?,?,?,?,?,?,?)"
params = [
self.username,
self.title,
self.employer,
self.date_start,
self.date_end,
self.location,
self.description
]
db.execute(sql, params)
def getJobInformation(username: str, db: database_access) -> List[JobExperience]:
jobQueryString = '''
SELECT *
FROM job_experience
WHERE username = ?
'''
jobInformation = db.execute(jobQueryString, [username])
experiences = list()
for job in jobInformation:
experiences.append(JobExperience(
job[0], job[1], job[2], job[3], job[4], job[5], job[6]))
return experiences
```
#### File: CEN4020TeamAlaska/src/message.py
```python
from src.database_access import database_access
class Message:
@staticmethod
def send_message(sender: str, receiver: str, body: str, db: database_access):
# the status is either sent or read
sql_post_messages_string = '''
INSERT INTO messages (sender, receiver, body) VALUES (?, ?, ?)
'''
res = db.execute(sql_post_messages_string, [sender, receiver, body])
@staticmethod
def get_my_messages(receiver: str, db: database_access):
sql_get_messages = '''
SELECT * FROM messages WHERE receiver = ? ORDER BY time_sent
'''
res = db.execute(sql_get_messages, [receiver])
return res
@staticmethod
def delete_message(message_id: int, db: database_access):
sql_delete_message = '''
DELETE FROM messages WHERE message_id = ?
'''
db.execute(sql_delete_message, [message_id])
check = 'SELECT COUNT(*) FROM messages WHERE message_id = ?'
# checking if the delete was successful
res = db.execute(check, [message_id])
return True if res[0][0] == 0 else False
```
#### File: src/tests/test_JobPosting.py
```python
import src.database_access
import src.Page
import src.PostedJob
import src.Job
def resetFunctions():
src.Page.input = input
src.Page.print = print
class TestJobPosting():
page = src.Page.Page()
page.user.username = "General Kenobi The Negotiator"
db_name = "testing.sqlite3"
db = src.database_access.database_access(db_name)
src.Page.db = db
def clearTables(self):
self.db.delete_user_applied()
self.db.delete_user_interested()
def testPostValidJob(self):
input_values = ['Worm Farmer', 'Farming worms',
'WormsRUs', 'Bikini Bottom', '20000']
output = []
def mock_input(s):
return input_values.pop(0)
src.Page.input = mock_input
src.Page.print = lambda s: output.append(s)
self.page.postjob()
resetFunctions()
assert output == [
"Thanks your job was posted! Returning to the previous menu..."
]
def testPostInvalidJob(self):
input_values = ['Worm Farmer0', 'Farming worms',
'WormsRUs', 'Bikini Bottom', 'Shmeckle', '20000']
output = []
def mock_input(s):
output.append(s)
return input_values.pop(0)
src.Page.input = mock_input
src.Page.print = lambda s: output.append(s)
self.page.postjob()
resetFunctions()
assert output == [
"Please enter the job's title: ",
"Please enter a description of the job: ",
"Who is the employer of the job? ",
"Where is this job located? ",
"Please estimate the salary of the job (only numbers): ",
"Not a valid number. Try again.",
"Please estimate the salary of the job (only numbers): ",
"Thanks your job was posted! Returning to the previous menu..."
]
def testJobPostLimit(self):
for i in range(1, 10):
input_values = [
'Worm Farmer' + str(i), 'Farming worms', 'WormsRUs', 'Bikini Bottom', '20000']
def mock_input(s):
return input_values.pop(0)
src.Page.input = mock_input
self.page.postjob()
output = []
input_values = ['Not going to post', 'Farming worms',
'WormsRUs', 'Bikini Bottom', '20000']
def mock_input(s):
return input_values.pop(0)
src.Page.input = mock_input
src.Page.print = lambda s: output.append(s)
self.page.postjob()
resetFunctions()
assert output == [
'There are already 10 jobs. Please try again later\n'
]
def testDatabaseJobPrint(self):
output = []
src.database_access.print = lambda s: output.append(s)
self.db.print_jobs()
print(output)
assert output == [(1, 'General Kenobi The Negotiator', 'Worm Farmer', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), (2, 'General Kenobi The Negotiator', 'Worm Farmer0', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), (3, 'General Kenobi The Negotiator', 'Worm Farmer1', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), (4, 'General Kenobi The Negotiator', 'Worm Farmer2', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), (5, 'General Kenobi The Negotiator', 'Worm Farmer3', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0),
(6, 'General Kenobi The Negotiator', 'Worm Farmer4', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), (7, 'General Kenobi The Negotiator', 'Worm Farmer5', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), (8, 'General Kenobi The Negotiator', 'Worm Farmer6', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), (9, 'General Kenobi The Negotiator', 'Worm Farmer7', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), (10, 'General Kenobi The Negotiator', 'Worm Farmer8', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0)]
resetFunctions()
def test_get_job_by_id(self):
expected = src.Job.Job(1, 'General Kenobi The Negotiator', 'Worm Farmer',
'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0)
actual = src.Job.Job.get_job_by_id(1, self.db)
assert expected == actual
# Test job does not exist
expected = False
actual = actual = src.Job.Job.get_job_by_id(-1, self.db)
assert expected == actual
def test_get_my_postings(self):
# Test user with jobs
expected = [src.Job.Job(1, 'General Kenobi The Negotiator', 'Worm Farmer', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(2, 'General Kenobi The Negotiator', 'Worm Farmer0', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(3, 'General Kenobi The Negotiator', 'Worm Farmer1', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(4, 'General Kenobi The Negotiator', 'Worm Farmer2', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(5, 'General Kenobi The Negotiator', 'Worm Farmer3', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0),
src.Job.Job(6, 'General Kenobi The Negotiator', 'Worm Farmer4', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(7, 'General Kenobi The Negotiator', 'Worm Farmer5', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(8, 'General Kenobi The Negotiator', 'Worm Farmer6', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(9, 'General Kenobi The Negotiator', 'Worm Farmer7', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(10, 'General Kenobi The Negotiator', 'Worm Farmer8', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0)]
actual = src.Job.Job.get_my_postings(
"General Kenobi The Negotiator", self.db)
assert expected == actual
# Test user does not exist
actual = src.Job.Job.get_my_postings("NonExistentUser", self.db)
assert actual == []
def test_print_full_job(self):
job = src.Job.Job(1, 'General Kenobi The Negotiator', 'Worm Farmer',
'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0)
output = []
src.Job.print = lambda s: output.append(s)
job.print_full_job()
expected = ['\n*Worm Farmer Job Posting*\n' +
'Job Description: Farming worms\n' +
'Location: Bikini Bottom\n' +
'Expected Salary: 20000.0\n' +
'Posted By: WormsRUs']
assert output == expected
def test_delete_job(self):
expected = [src.Job.Job(2, 'General Kenobi The Negotiator', 'Worm Farmer0', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(3, 'General Kenobi The Negotiator', 'Worm Farmer1', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(4, 'General Kenobi The Negotiator', 'Worm Farmer2', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(5, 'General Kenobi The Negotiator', 'Worm Farmer3', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0),
src.Job.Job(6, 'General Kenobi The Negotiator', 'Worm Farmer4', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(7, 'General Kenobi The Negotiator', 'Worm Farmer5', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(8, 'General Kenobi The Negotiator', 'Worm Farmer6', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(9, 'General Kenobi The Negotiator', 'Worm Farmer7', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0), src.Job.Job(10, 'General Kenobi The Negotiator', 'Worm Farmer8', 'Farming worms', 'WormsRUs', 'Bikini Bottom', 20000.0)]
assert src.Job.Job.delete_job(1, self.db) == True
actual = src.Job.Job.get_my_postings(
'General Kenobi The Negotiator', self.db)
assert actual == expected
def test_apply_job(self):
# Test no applied jobs
assert src.Job.Job.get_applied_jobs("darvelo", self.db) == False
# Test applied job has new item
src.Job.Job.apply_job("darvelo", 2, "some reason", self.db)
assert src.Job.Job.get_applied_jobs("darvelo", self.db) == [
src.Job.Job.get_job_by_id(2, self.db)]
def test_add_interested(self):
# Test no interested jobs
assert src.Job.Job.get_interested_jobs("darvelo", self.db) == False
# Test interested job has new item
src.Job.Job.add_interested("darvelo", 3, self.db)
assert src.Job.Job.get_interested_jobs(
"darvelo", self.db) == [src.Job.Job.get_job_by_id(3, self.db)]
def testCleanUp(self): # Teardown
self.db.delete_jobs_table()
self.db.delete_user_applied()
self.db.delete_user_interested()
# self.db.close()
assert True == True
```
#### File: src/tests/test_page.py
```python
import datetime
import src.Page
import src.database_access
from src.User import *
from Profile.Profile import *
from src.Page import *
import src.helpers
import src.Job
from src.Course import Course
# Does initial setup before any test runs
def setup_module():
global db
db = Database("testing.sqlite3")
src.Page.db = db
db.delete_profile_table()
db.delete_users_table()
db.delete_user_friends()
db.delete_job_experience_table()
db.delete_user_interested()
db.delete_user_applied()
db.delete_notifications()
db.delete_courses()
def resetFunctions():
src.Page.input = input
src.Page.print = print
src.helpers.input = input
class TestIsPasswordSecure:
page = src.Page.Page()
def test_password_character_limit_lower(self):
assert self.page.is_password_secure("<PASSWORD>") == False
assert self.page.is_password_secure("") == False
assert self.page.is_password_secure("<PASSWORD>") == False # 7 chars
assert self.page.is_password_secure("<PASSWORD>") == True # 8 chars
assert self.page.is_password_secure("<PASSWORD>") == True
def test_password_character_limit_upper(self):
assert self.page.is_password_secure("<PASSWORD>") == False
assert self.page.is_password_secure(
"<PASSWORD>") == False # 13 chars
assert self.page.is_password_secure("<PASSWORD>") == True # 12 chars
assert self.page.is_password_secure("<PASSWORD>") == True
def test_password_contains_capital(self):
assert self.page.is_password_secure("password1#") == False
assert self.page.is_password_secure("Password1#") == True
assert self.page.is_password_secure("<PASSWORD>") == False
assert self.page.is_password_secure("<PASSWORD>") == True
def test_password_contains_lowercase(self):
assert self.page.is_password_secure("PASSWORD1#") == False
assert self.page.is_password_secure("<PASSWORD>#") == True
assert self.page.is_password_secure("<PASSWORD>") == False
assert self.page.is_password_secure("<PASSWORD>") == True
def test_password_contains_number(self):
assert self.page.is_password_secure("Password$$") == False
assert self.page.is_password_secure("Password1$") == True
def test_password_contains_special(self):
assert self.page.is_password_secure("<PASSWORD>") == False
assert self.page.is_password_secure("<PASSWORD>#") == True
class TestGetCredentials:
page = src.Page.Page()
def testLoginIO(self):
input_values = ['randion', 'Password#1']
output = []
def mock_input(s):
output.append(s)
return input_values.pop(0)
src.Page.input = mock_input
src.Page.print = lambda s: output.append(s)
self.page.get_credentials(False)
resetFunctions()
assert output == [
'Enter username: ',
'Enter password: ',
]
def testRegisterIO(self):
input_values = ['randion', 'Password#1', '<PASSWORD>by', '<PASSWORD>', '1']
output = []
def mock_input(s):
output.append(s)
return input_values.pop(0)
src.helpers.input = mock_input
src.Page.input = mock_input
src.Page.print = lambda s: output.append(s)
self.page.get_credentials(True)
resetFunctions()
assert output == [
'Enter username: ',
'Enter password: ',
'Enter first name: ',
'Enter last name: ',
'1- Standard Tier\n2- Plus Tier\nEnter a choice: ',
''
]
class TestRegisterLogin:
page = src.Page.Page()
db_name = "testing.sqlite3"
db = src.database_access.database_access(db_name)
src.Page.db = db
def testUserRegistration(self):
input_values = ['randion', '<PASSWORD>', '<PASSWORD>by', '<PASSWORD>', '1']
output = []
def mock_input(s):
return input_values.pop(0)
src.Page.input = mock_input
src.helpers.input = mock_input
src.Page.print = lambda s: output.append(s)
self.page.register()
resetFunctions()
print(output)
assert output == ['1- Standard Tier\n2- Plus Tier\nEnter a choice: ',
'An account for randion was registered successfully']
def testUserLoginCorrect(self):
input_values = ['randion', '<PASSWORD>']
output = []
def mock_input(s):
output.append(s)
return input_values.pop(0)
src.Page.input = mock_input
src.Page.print = lambda s: output.append(s)
self.page.login()
resetFunctions()
assert output == [
'Enter username: ',
'Enter password: ',
"You have successfully logged in\n",
]
def testUserLoginIncorrect(self):
input_values = ['randion', 'Password#']
output = []
def mock_input(s):
output.append(s)
return input_values.pop(0)
src.Page.input = mock_input
src.Page.print = lambda s: output.append(s)
self.page.login()
resetFunctions()
assert output == [
'Enter username: ',
'Enter password: ',
"Incorrect username / password, please try again\n"
]
def testUserRegistrationLimit(self):
def mock_input(s):
return input_values.pop(0)
src.Page.input = mock_input
src.helpers.input = mock_input
for i in range(0, 11):
input_values = [
'randion' + str(i), 'Password#1' + str(i), 'Robby' + str(i), 'Ybbor' + str(i), '1']
self.page.register()
resetFunctions()
output = []
input = ['TomSawyer', '<PASSWORD>', 'Tommy', "Sawyer", '2']
def mock_input(s):
output.append(s)
src.Page.input = mock_input
src.helpers.input = mock_input
src.Page.print = lambda s: output.append(s)
self.page.register()
resetFunctions()
assert output == [
"All permitted accounts have been created, please come backlater\n"
]
# def testDatabaseUserPrint(self):
# output = []
# src.database_access.print = lambda s: output.append(s)
# self.db.print_users()
# src.database_access.print = print
# expected = [("randion", "Password#1", "Robby",
# "Ybbor", "standard", "english", 1, 1, 1)]
# for i in range(0, 9):
# expected.append((
# 'randion' + str(i), 'Password#1' + str(i), 'Robby' + str(i), 'Ybbor' + str(i), "standard", "english", 1, 1, 1))
# assert output == expected
def testCleanUp(self): # Teardown
self.db.delete_users_table()
assert True == True
class TestProfileControls:
def SetUp(self):
credentials = ("testuser", "<PASSWORD>!",
"Nathan", "Aldino", "standard")
create_user(credentials, db)
def testProfilePrint(self):
profile = Profile("testuser", "sir", "general",
"university", "i code", "no education")
getProfile("testuser", db)
profile.set_title(profile.title, db)
profile.set_major(profile.major, db)
profile.set_university_name(profile.university_name, db)
profile.set_about_me(profile.about_me, db)
profile.set_education(profile.education, db)
comparison = getProfile("testuser", db)
print("\n\n\n\n")
print(comparison.title)
assert profile == comparison
def testJobExperiencePrint(self):
testjobexp = JobExperience(
"testuser", "publix1", "publix", "today", "tomrrow", "here", "cashier")
testjobexp.DbAddJobExperience(db)
alljobs = getJobInformation("testuser", db)
assert alljobs[0] == testjobexp
def testEditIfIncomplete(self):
unfinishedprofile = Profile(None, None, None, None, None, None)
unfinishedprofile.set_title("mr", db)
assert (
unfinishedprofile.title == "mr" and
not unfinishedprofile.isComplete()
)
def testEditComplete(self):
unfinishedprofile = Profile("user", None, None, None, None, None)
unfinishedprofile.set_title("title", db)
unfinishedprofile.set_major("compsci", db)
unfinishedprofile.set_university_name("usf", db)
unfinishedprofile.set_about_me("apple", db)
unfinishedprofile.set_education("elementary school", db)
assert unfinishedprofile.isComplete()
def CleanUp(self):
db.delete_profile_table()
db.delete_users_table()
db.delete_job_experience_table()
class TestNetworkPage:
page = src.Page.Page()
def testSetUp(self):
users = [
("darvelo", "Password1!",
"Daniel", "Arvelo", "Standard"),
("marvelo", "Password1!", "Maniel", "Arvelo", "standard"),
("rarvelo", "Password1!", "Raniel", "Arvelo", "standard")
]
for i in range(len(users)):
user = create_user(users[i], db)
if user.username == users[0][0]:
self.page.user = user
else:
# make friends with first user
sql = '''
INSERT INTO user_friends VALUES (?,?,?)
'''
db.execute(sql, [self.page.user.username,
users[i][0], 'Approved'])
# Create profiles for all but last user
if i != len(users) - 1:
profile = getProfile(users[i][0], db)
profile.set_about_me("test about me {}".format(i), db)
profile.set_education("test education {}".format(i), db)
profile.set_major("test major {}".format(i), db)
profile.set_title("test title {}".format(i), db)
profile.set_university_name("test university {}".format(i), db)
def testMissingProfile(self):
input_values = ['0']
output = []
self.page.user = get_user_by_username("darvelo", db)
def mock_input(s):
return input_values.pop(0)
src.helpers.input = mock_input
src.Page.print = lambda s: output.append(s)
self.page.myNetwork_page()
resetFunctions()
assert output == [
"Welcome to the your friends, where you hopefully have some.\n",
"These are your friends:\n1 - marvelo\n2 - rarvelo\n",
"Select one of the users below to view profile.",
"1 - marvelo\n2 - Previous Page\nEnter a number: ",
]
def testNoFriends(self):
input_values = ['0']
output = []
credentials = ("garvelo", "Password1!",
"Ganiel", "Arvelo", "plus")
self.page.user = create_user(credentials, db)
def mock_input(s):
return input_values.pop(0)
src.helpers.input = mock_input
src.Page.print = lambda s: output.append(s)
self.page.myNetwork_page()
resetFunctions()
assert output == [
"Welcome to the your friends, where you hopefully have some.\n",
"Sorry you have no friends, your mother did warn you.",
"Select one of the options below: ",
"1 - Previous Page\nEnter a number: "
]
class TestFriends:
page = src.Page.Page()
def testSetup(self):
self.page.user = create_user(
("john", "<PASSWORD>!", "John", "Smith", "standard"), db)
create_user(("mary", "<PASSWORD>!", "Mary", "Smith", "standard"), db)
create_user(("eric", "<PASSWORD>!", "eric", "smith", "standard"), db)
mary = getProfile("mary", db)
mary.set_about_me("test about me", db)
mary.set_education("test education", db)
mary.set_major("test major", db)
mary.set_title("test title", db)
mary.set_university_name("university123", db)
eric = getProfile("eric", db)
eric.set_about_me("test about me", db)
eric.set_education("test education", db)
eric.set_major("major123", db)
eric.set_title("test title", db)
eric.set_university_name("test university", db)
# eric sends FR to john and gets accepted by john
def testPendingRequest(self):
db.execute('''INSERT INTO user_friends VALUES (?,?,?)''',
['eric', self.page.user.username, 'Pending'])
input = ["1"]
output = []
def mock_input(s):
return input.pop(0)
src.Page.input = mock_input
src.Page.print = lambda s: output.append(s)
self.page.pendingFriendRequests(db)
resetFunctions()
assert output == [
"Friend request from eric. Enter 1 to accept or 2 to decline.\n",
"Successfully saved your changes!\n"
]
# searching for mary via university
def testSearchUni(self):
input = ["university123", "1"]
output = []
def mock_input(s):
return input.pop(0)
src.Page.input = mock_input
src.Page.print = lambda s: output.append(s)
self.page.searchByUniversity()
resetFunctions()
assert output == [
'Results:\n', '1: Username: mary Firstname: Mary Lastname: Smith', 'Friend Request Sent']
# searching for mary via major
def testSearchMajor(self):
input = ["major123", "1"]
output = []
def mock_input(s):
return input.pop(0)
src.Page.input = mock_input
src.Page.print = lambda s: output.append(s)
self.page.searchByMajor()
resetFunctions()
assert output == [
'Results:\n', '1: Username: eric Firstname: eric Lastname: smith', 'Friend Request Sent']
def delete_users(self):
db.execute('''INSERT INTO user_friends VALUES (?,?,?)''',
['dana', self.page.user.username, 'Pending'])
self.page.delete_friend('dana', db)
sql_for_all_friends = '''
SELECT * FROM user_friends WHERE (username1 = ? AND username2 = ?) OR (username1 = ? AND username2 = ?)
'''
res = db.execute(sql_for_all_friends, [
self.user.username, 'dana', 'dana', self.user.username])
assert res is None
class TestJobPages:
page = src.Page.Page()
page.user = User("darvelo", "", "", "", "standard", "", "",
"", "", datetime.datetime.now(), True, db)
def test_job_page_view_job_no_jobs(self):
input_Page = ['1']
input_helpers = ['2']
output = []
def mock_input_Page(s):
return input_Page.pop(0)
src.Page.input = mock_input_Page
def mock_input_helpers(s):
return input_helpers.pop(0)
src.helpers.input = mock_input_helpers
src.Page.print = lambda s: output.append(s)
self.page.post_job_page()
assert output == [
"You have currently applied for 0 jobs",
'1 - Post a New Job\n2 - View Jobs\n3 - My Postings\n4 - View applications\n5 - View interested\n6 - Previous page\nEnter a choice: ',
"sorry, no jobs for you"
]
def test_job_page_view_job(self):
# -- Setup - create jobs, users --
input_Page = ['Worm Farmer', 'Farming worms',
'WormsRUs', 'Bikini Bottom', '20000']
output = []
def mock_input_Page(s):
return input_Page.pop(0)
src.Page.input = mock_input_Page
def mock_input_helpers(s):
return input_helpers.pop(0)
src.helpers.input = mock_input_helpers
self.page.postjob()
input_Page = ['Worm Farmer 2', 'Farming worms 2',
'WormsRUs 2', 'Bikini Bottom 2', '20000']
self.page.postjob()
# -- end setup --
input_Page = ['1', '-1', '-1']
input_helpers = ['2']
src.Page.print = lambda s: output.append(s)
self.page.post_job_page()
resetFunctions()
assert output == [
"You have currently applied for 0 jobs",
'1 - Post a New Job\n2 - View Jobs\n3 - My Postings\n4 - View applications\n5 - View interested\n6 - Previous page\nEnter a choice: ',
"Available Jobs:",
"1 - Worm Farmer",
"2 - Worm Farmer 2"
]
# -- TEST Job does not exist --
input_Page = ['-1', '1']
input_helpers = ['2']
output = []
src.Page.input = mock_input_Page
src.helpers.input = mock_input_helpers
src.Page.print = lambda s: output.append(s)
self.page.post_job_page()
resetFunctions()
assert output == [
"You have currently applied for 0 jobs",
'1 - Post a New Job\n2 - View Jobs\n3 - My Postings\n4 - View applications\n5 - View interested\n6 - Previous page\nEnter a choice: ',
"Available Jobs:",
"1 - Worm Farmer",
"2 - Worm Farmer 2",
"Job does not exist"
]
def test_job_page_view_my_postings(self):
# -- Setup --
def mock_input_Page(s):
return input_Page.pop(0)
src.Page.input = mock_input_Page
def mock_input_helpers(s):
return input_helpers.pop(0)
src.helpers.input = mock_input_helpers
# -- end setup --
input_Page = ['0']
input_helpers = ['3', '2']
output = []
src.Page.print = lambda s: output.append(s)
self.page.post_job_page()
assert output == [
"You have currently applied for 0 jobs",
'1 - Post a New Job\n2 - View Jobs\n3 - My Postings\n4 - View applications\n5 - View interested\n6 - Previous page\nEnter a choice: ',
'\nMy Postings:',
'Job ID: 1, Title: Worm Farmer',
'Job ID: 2, Title: Worm Farmer 2',
'\n1 - Delete Job\n2 - Previous page\nEnter a choice: ',
]
def test_view_applications(self):
# -- Setup --
def mock_input_helpers(s):
return input_helpers.pop(0)
src.helpers.input = mock_input_helpers
src.Job.Job.apply_job("darvelo", 1, "some reason", db)
# -- end setup --
input_helpers = ['4']
output = []
src.Page.print = lambda s: output.append(s)
self.page.post_job_page()
resetFunctions()
assert output == [
"You have currently applied for 1 jobs",
'1 - Post a New Job\n2 - View Jobs\n3 - My Postings\n4 - View applications\n5 - View interested\n6 - Previous page\nEnter a choice: ',
'\n',
'\n',
]
def test_view_interested_none(self):
# -- Setup --
def mock_input_helpers(s):
return input_helpers.pop(0)
src.helpers.input = mock_input_helpers
# -- end setup --
input_helpers = ['5']
output = []
src.Page.print = lambda s: output.append(s)
self.page.post_job_page()
resetFunctions()
assert output == [
"You have currently applied for 1 jobs",
'1 - Post a New Job\n2 - View Jobs\n3 - My Postings\n4 - View applications\n5 - View interested\n6 - Previous page\nEnter a choice: ',
"You are not interested in any jobs currently.\n"
]
def test_view_interested(self):
# -- Setup --
def mock_input_helpers(s):
return input_helpers.pop(0)
src.helpers.input = mock_input_helpers
src.Job.Job.add_interested("darvelo", 2, db)
# -- end setup --
input_helpers = ['5']
output = []
src.Page.print = lambda s: output.append(s)
self.page.post_job_page()
resetFunctions()
assert output == [
"You have currently applied for 1 jobs",
'1 - Post a New Job\n2 - View Jobs\n3 - My Postings\n4 - View applications\n5 - View interested\n6 - Previous page\nEnter a choice: ',
]
def test_job_page_delete_posting(self):
# -- Setup --
def mock_input_Page(s):
return input_Page.pop(0)
src.Page.input = mock_input_Page
def mock_input_helpers(s):
return input_helpers.pop(0)
src.helpers.input = mock_input_helpers
# -- end setup --
input_Page = ['0']
input_helpers = ['3', '1', '1']
output = []
src.Page.print = lambda s: output.append(s)
self.page.post_job_page()
resetFunctions()
assert output == [
"You have currently applied for 1 jobs",
'1 - Post a New Job\n2 - View Jobs\n3 - My Postings\n4 - View applications\n5 - View interested\n6 - Previous page\nEnter a choice: ',
'\nMy Postings:',
'Job ID: 1, Title: Worm Farmer',
'Job ID: 2, Title: Worm Farmer 2',
'\n1 - Delete Job\n2 - Previous page\nEnter a choice: ',
'Enter the Job ID to Delete: ',
'Job successfully deleted'
]
def test_job_page_view_my_postings_Zero(self):
# -- Setup --
self.page.user = User("NonExistentUser", "", "",
"", "", "", "", "", "", None, True, db)
def mock_input_Page(s):
return input_Page.pop(0)
src.Page.input = mock_input_Page
def mock_input_helpers(s):
return input_helpers.pop(0)
src.helpers.input = mock_input_helpers
# -- end setup --
input_Page = ['0']
input_helpers = ['3']
output = []
src.Page.print = lambda s: output.append(s)
self.page.post_job_page()
resetFunctions()
assert output == [
"You have currently applied for 0 jobs",
'1 - Post a New Job\n2 - View Jobs\n3 - My Postings\n4 - View applications\n5 - View interested\n6 - Previous page\nEnter a choice: ',
"You don't have any postings at the moment",
]
class TestTrainingPage:
page = src.Page.Page()
def trainingandeducation(self):
input = ['1']
output = []
def mock_input(s):
return input.pop(0)
src.Page.input = mock_input
src.page.print = lambda s: output.append(s)
self.page.training_page
resetFunctions()
assert output == [
"1 - Training and Education\n2 - IT Help Desk\n3 - Business Analysis and Strategy\n4 - Security\n5 - Go back",
"1 - Learn Python\n2 - Learn React\n3 - Public Speaking 101\n4 - SCRUM basics",
"Under Construction."
]
def ITandsecurity(self):
input = ['2']
output = []
def mock_input(s):
return input.pop(0)
src.Page.input = mock_input
src.page.print = lambda s: output.append(s)
self.page.training_page
resetFunctions()
assert output == [
"1 - Training and Education\n2 - IT Help Desk\n3 - Business Analysis and Strategy\n4 - Security\n5 - Go back",
"Coming soon!"
]
def business(self):
input = ['3', '4']
output = []
def mock_input(s):
return input.pop(0)
src.Page.input = mock_input
src.page.print = lambda s: output.append(s)
self.page.training_page
resetFunctions()
assert output == [
"1 - Training and Education\n2 - IT Help Desk\n3 - Business Analysis and Strategy\n4 - Security\n5 - Go back",
"1 - How to use InCollege learning\n2 - Train the trainer\n3 - Gamification of learning\n4 - Not seeing what you're looking for? Sign in to see all 7,609 results.",
'Enter username: ',
]
def incollegetraining(self):
input = []
output = []
def mock_input(s):
return input.pop(0)
src.Page.input = mock_input
src.page.print = lambda s: output.append(s)
self.page.in_college_learning_page
resetFunctions()
assert output == [
"Pick any of the courses below to enroll:\n",
"1 - How to use InCollege learning\n2 - Train the trainer\n3 - Gamification of learning\n4 - Understanding the Architectural Design Process\n5 - Project Management Simplified\n6 - Go Back"
]
def test_add_courses(self):
Course.setCourseStatus(self.page.user.username,
"Software Dev", False, db)
queryString = "SELECT * FROM student_courses WHERE username = ? AND title = ?"
res = db.execute(
queryString, [self.page.user.username, "Software Dev"])
assert len(res) > 0
def test_update_status(self):
Course.setCourseStatus(self.page.user.username,
"Software Engineer", False, db)
Course.setCourseStatus(self.page.user.username,
"Software Engineer", True, db)
queryString = "SELECT * FROM student_courses WHERE username = ? AND title = ?"
res = db.execute(
queryString, [self.page.user.username, "Software Engineer"])
assert res[0][2] == True
# Runs after every test in this file has finished running
def teardown_module():
db = Database('testing.sqlite3')
db.delete_profile_table()
db.delete_users_table()
db.delete_user_friends()
db.delete_job_experience_table()
db.delete_jobs_table()
db.delete_user_applied()
db.delete_user_interested()
db.delete_notifications()
db.close()
``` |
{
"source": "jmandivarapu1/GraphMining",
"score": 3
} |
#### File: jmandivarapu1/GraphMining/yelp_karate_club.py
```python
import networkx as nx
import networkx.algorithms.community as nxcom
from matplotlib import pyplot as plt
%matplotlib inline
plt.rcParams.update(plt.rcParamsDefault)
plt.rcParams.update({'figure.figsize': (15, 10)})
# get reproducible results
import random
from numpy import random as nprand
random.seed(123)
nprand.seed(123)
G_karate = nx.karate_club_graph()
# Find the communities
communities = sorted(nxcom.greedy_modularity_communities(G_karate), key=len, reverse=True)
# Count the communities
print(f"The karate club has {len(communities)} communities.")
def set_node_community(G, communities):
'''Add community to node attributes'''
for c, v_c in enumerate(communities):
for v in v_c:
# Add 1 to save 0 for external edges
G.nodes[v]['community'] = c + 1
def set_edge_community(G):
'''Find internal edges and add their community to their attributes'''
for v, w, in G.edges:
if G.nodes[v]['community'] == G.nodes[w]['community']:
# Internal edge, mark with community
G.edges[v, w]['community'] = G.nodes[v]['community']
else:
# External edge, mark as 0
G.edges[v, w]['community'] = 0
def get_color(i, r_off=1, g_off=1, b_off=1):
'''Assign a color to a vertex.'''
r0, g0, b0 = 0, 0, 0
n = 16
low, high = 0.1, 0.9
span = high - low
r = low + span * (((i + r_off) * 3) % n) / (n - 1)
g = low + span * (((i + g_off) * 5) % n) / (n - 1)
b = low + span * (((i + b_off) * 7) % n) / (n - 1)
return (r, g, b)
# Set node and edge communities
set_node_community(G_karate, communities)
set_edge_community(G_karate)
node_color = [get_color(G_karate.nodes[v]['community']) for v in G_karate.nodes]
# Set community color for edges between members of the same community (internal) and intra-community edges (external)
external = [(v, w) for v, w in G_karate.edges if G_karate.edges[v, w]['community'] == 0]
internal = [(v, w) for v, w in G_karate.edges if G_karate.edges[v, w]['community'] > 0]
internal_color = ['black' for e in internal]
karate_pos = nx.spring_layout(G_karate)
plt.rcParams.update({'figure.figsize': (15, 10)})
# Draw external edges
nx.draw_networkx(
G_karate,
pos=karate_pos,
node_size=0,
edgelist=external,
edge_color="silver")
# Draw nodes and internal edges
nx.draw_networkx(
G_karate,
pos=karate_pos,
node_color=node_color,
edgelist=internal,
edge_color=internal_color)
data_path = './facebook_combined.txt'
G_social = nx.read_edgelist(data_path)
pos = nx.spring_layout(G_social, k=0.1)
plt.rcParams.update({'figure.figsize': (15, 10)})
nx.draw_networkx(
G_social,
pos=pos,
node_size=0,
edge_color="#444444",
alpha=0.05,
with_labels=False)
plt.rcParams.update(plt.rcParamsDefault)
plt.rcParams.update({'figure.figsize': (15, 10)})
plt.style.use('dark_background')
# Set node and edge communities
set_node_community(G_social, communities)
set_edge_community(G_social)
# Set community color for internal edges
external = [(v, w) for v, w in G_social.edges if G_social.edges[v, w]['community'] == 0]
internal = [(v, w) for v, w in G_social.edges if G_social.edges[v, w]['community'] > 0]
internal_color = ["black" for e in internal]
node_color = [get_color(G_social.nodes[v]['community']) for v in G_social.nodes]
# external edges
nx.draw_networkx(
G_social,
pos=pos,
node_size=0,
edgelist=external,
edge_color="silver",
node_color=node_color,
alpha=0.2,
with_labels=False)
# internal edges
nx.draw_networkx(
G_social, pos=pos,
edgelist=internal,
edge_color=internal_color,
node_color=node_color,
alpha=0.05,
with_labels=False)
result = nxcom.girvan_newman(G_karate)
communities = next(result)
len(communities)
plt.rcParams.update(plt.rcParamsDefault)
plt.rcParams.update({'figure.figsize': (15, 10)})
# Set node and edge communities
set_node_community(G_karate, communities)
set_edge_community(G_karate)
# Set community color for nodes
node_color = [get_color(G_karate.nodes[v]['community']) for v in G_karate.nodes]
# Set community color for internal edges
external = [(v, w) for v, w in G_karate.edges if G_karate.edges[v, w]['community'] == 0]
internal = [(v, w) for v, w in G_karate.edges if G_karate.edges[v, w]['community'] > 0]
internal_color = [get_color(G_karate.edges[e]['community']) for e in internal]
karate_pos = nx.spring_layout(G_karate)
# Draw external edges
nx.draw_networkx(
G_karate, pos=karate_pos, node_size=0,
edgelist=external, edge_color="#333333", with_labels=False)
# Draw nodes and internal edges
nx.draw_networkx(
G_karate, pos=karate_pos, node_color=node_color,
edgelist=internal, edge_color=internal_color)
plt.rcParams.update(plt.rcParamsDefault)
plt.rcParams.update({'figure.figsize': (15, 10)})
cliques = list(nx.find_cliques(G_karate))
max_clique = max(cliques, key=len)
node_color = [(0.5, 0.5, 0.5) for v in G_karate.nodes()]
for i, v in enumerate(G_karate.nodes()):
if v in max_clique:
node_color[i] = (0.5, 0.5, 0.9)
nx.draw_networkx(G_karate, node_color=node_color, pos=karate_pos)
# cores with at lest degree 30
G_core_30 = nx.k_core(G_social, 30)
# similarly, with at least degree 60
G_core_60 = nx.k_core(G_social, 60)
# Visualize network and k-cores
plt.rcParams.update(plt.rcParamsDefault)
plt.rcParams.update({'figure.figsize': (15, 10)})
plt.style.use('dark_background')
pos = nx.spring_layout(G_social, k=0.1)
nx.draw_networkx(
G_social, pos=pos, node_size=0, edge_color="#333333", alpha=0.05, with_labels=False)
nx.draw_networkx(
G_core_30, pos=pos, node_size=0, edge_color="green", alpha=0.05, with_labels=False)
nx.draw_networkx(
G_core_60, pos=pos, node_size=0, edge_color="red", alpha=0.05, with_labels=False)
``` |
{
"source": "jmaner/local-volume-db",
"score": 2
} |
#### File: local-volume-db/lvdb/table.py
```python
__author__ = "<NAME>"
import os
from os.path import dirname, abspath
import logging
import tempfile
from collections import OrderedDict as odict
import copy
import getpass
import numpy as np
import pandas as pd
import yaml
import dateutil.parser
import datetime
import fitsio
import healpy
import numpy.lib.recfunctions as recfn
from lvdb.database import Database
class Table(object):
"""Base class for postgres table objects."""
_filename = os.path.join(get_datadir(),'tables.yaml')
_section = None
def __init__(self,config=None,section=None):
if config is None: config = self._filename
if section is None: section = self._section
self.db = Database()
self.load_config(config,section)
def load_config(self,config, section=None):
if config is None: return config
if isinstance(config,basestring):
logging.debug("Loading configuration file: %s..."%config)
config = yaml.load(open(config,'r'))
elif isinstance(config,dict):
config = copy.deepcopy(config)
else:
msg = "Unrecognized type for table configuration: %s"
raise TypeError(msg)
if section is None:
self.config = config
else:
self.config = config[section]
self.tablename = self.config['table']
# Check the config
self.check_config()
return config
def check_config(self):
assert 'columns' in self.config
# Check that the columns match
if self.exists():
cfgcol = sorted(map(str.upper,self.config['columns'].keys()))
dbcol = sorted(map(str.upper,self.get_columns()))
if not np.all(cfgcol==dbcol):
msg = "Config columns do not match database."
raise ValueError(msg)
def exists(self):
return self.db.table_exists(self.tablename)
def get_columns(self):
query = "select * from %s limit 0;"%self.tablename
return self.db.get_columns(query)
def create_table(self):
return self.db.create_table(**self.config)
def drop_table(self):
self.db.drop_table(self.tablename)
def grant_table(self):
query = "grant select on %s to public;"%self.tablename
self.db.execute(query)
def create_indexes(self):
self.db.create_indexes(**self.config)
def build_table(self,force=True):
if force: self.drop_table()
self.create_table()
self.create_indexes()
self.grant_table()
def load_table(self,data,option=None):
self.db.load_data(self.tablename,data,option)
def get_description(self):
return self.db.get_description("select * from %s limit 0;"%self.tablename)
def get_dtypes(self):
return self.db.get_dtypes("select * from %s limit 0;"%self.tablename)
``` |
{
"source": "jmaneyrol69/yogit",
"score": 2
} |
#### File: jmaneyrol69/yogit/setup.py
```python
import setuptools
import yogit
DEPENDENCIES = ["click", "halo", "packaging", "PyYAML>=5.1", "pyperclip", "requests", "requests-toolbelt", "tabulate"]
def get_public_description():
with open("README.md", encoding="utf-8") as md_file:
package_description_content = md_file.read()
with open("CHANGELOG.md", encoding="utf-8") as md_file:
changelog_content = md_file.read()
return package_description_content + "\n\n" + changelog_content
setuptools.setup(
name=yogit.__application__,
version=yogit.__version__,
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
description="Command line utility for GitHub daily work.",
long_description=get_public_description(),
long_description_content_type="text/markdown",
keywords="git github utility branch pull requests",
url="https://github.com/hasboeuf/yogit",
packages=setuptools.find_packages(exclude=["*.tests", "*.tests.*", "tests"]),
classifiers=["Programming Language :: Python :: 3", "Operating System :: OS Independent"],
zip_safe=True,
install_requires=DEPENDENCIES,
entry_points={"console_scripts": ["yogit=yogit.yogit.cli:main"]},
)
```
#### File: yogit/api/queries.py
```python
import textwrap
import click
from tabulate import tabulate
import yogit.api.statements as S
from yogit.api.client import GraphQLClient, RESTClient
from yogit.api.statement import prepare, prepare_pagination
from yogit.utils.dateutils import dt_for_str, days_ago_str
from yogit.utils.spinner import spin
from yogit.yogit.logger import LOGGER
def shorten_str(full_string):
"""
Shorten string to 50 chars max, including an ending ellipsis
Words are not truncated
"""
return textwrap.shorten(full_string, width=50, placeholder="...")
class Query:
""" Represent a GitHub query """
def __init__(self):
self._response = []
self.client = None
def _handle_response(self, response):
self._response.append(response)
def execute(self):
""" Execute the query """
raise NotImplementedError()
def tabulate(self):
""" Return tabulated result """
raise NotImplementedError()
def print(self):
""" Print result """
click.echo(self._response)
class GraphQLQuery(Query):
def __init__(self, statement, variables=[], extra_data={}, pagination_offset=None):
super().__init__()
self.client = GraphQLClient()
self.statement = statement
self.variables = variables
self.extra_data = extra_data
self.pagination_offset = pagination_offset
def get_pagination_info(self):
raise NotImplementedError()
def get_count(self):
raise NotImplementedError()
@spin
def execute(self, spinner):
prepared_statement = prepare(self.statement, self.variables, self.extra_data)
if self.pagination_offset is None:
response = self.client.get(prepared_statement)
super()._handle_response(response)
self._handle_response(response)
return
cursor = None
has_next = True
while has_next:
paginated_statement = prepare_pagination(prepared_statement, self.pagination_offset, cursor)
response = self.client.get(paginated_statement)
super()._handle_response(response)
self._handle_response(response)
count = self.get_count()
if count > 0:
spinner.text = "Loading... (yet {} entries found)".format(count)
pagination_info = self.get_pagination_info(response)
has_next = pagination_info["hasNextPage"]
cursor = pagination_info["endCursor"]
class RESTQuery(Query):
def __init__(self, endpoint):
super().__init__()
self.client = RESTClient()
self.endpoint = endpoint
@spin
def execute(self, spinner):
response = self.client.get(self.endpoint)
super()._handle_response(response)
self._handle_response(response)
class LoginQuery(GraphQLQuery):
def __init__(self):
super().__init__(S.LOGIN_STATEMENT)
self.login = None
def _handle_response(self, response):
self.login = response["data"]["viewer"]["login"]
def get_login(self):
return self.login
class ReviewRequestedQuery(GraphQLQuery):
def __init__(self, is_closed):
self.is_closed = is_closed
state = "closed" if self.is_closed else "open"
super().__init__(
S.REVIEW_REQUESTED_STATEMENT,
variables=[S.LOGIN_VARIABLE],
extra_data={"state": state},
pagination_offset=100,
)
self.data = []
def get_pagination_info(self, response):
return response["data"]["search"]["pageInfo"]
def get_count(self):
return len(self.data)
def _handle_response(self, response):
for pr in response["data"]["search"]["edges"]:
title = shorten_str(pr["node"]["title"])
url = pr["node"]["url"]
updated = dt_for_str(pr["node"]["updatedAt"]).date()
updated_str = days_ago_str(updated)
self.data.append([updated, updated_str, url, title])
self.data = sorted(self.data, key=lambda x: (x[2], x[3]))
self.data = sorted(self.data, key=lambda x: x[0], reverse=True)
def print(self):
if len(self.data) == 0:
click.secho("All done! 🎉✨", bold=True)
else:
click.echo(tabulate([x[1:] for x in self.data], headers=["UPDATED", "PULL REQUEST", "TITLE"]))
click.secho("Count: {}".format(len(self.data)), bold=True)
class ReviewListQuery(GraphQLQuery):
def __init__(self):
super().__init__(S.REVIEW_LIST_STATEMENT, pagination_offset=100)
self.data = []
def get_pagination_info(self, response):
return response["data"]["viewer"]["contributionsCollection"]["pullRequestReviewContributions"]["pageInfo"]
def get_count(self):
return len(self.data)
def _handle_response(self, response):
for review in response["data"]["viewer"]["contributionsCollection"]["pullRequestReviewContributions"]["edges"]:
pr_state = review["node"]["pullRequest"]["state"]
if pr_state != "OPEN":
continue
url = review["node"]["pullRequest"]["url"]
rv_state = review["node"]["pullRequestReview"]["state"]
last_commit_pushed = dt_for_str(
review["node"]["pullRequest"]["commits"]["edges"][0]["node"]["commit"]["pushedDate"]
)
rv_updated = review["node"]["pullRequestReview"]["updatedAt"]
if rv_updated is None:
rv_updated = review["node"]["pullRequestReview"]["createdAt"]
rv_updated = dt_for_str(rv_updated)
up_to_date = rv_updated > last_commit_pushed
rv_updated_str = days_ago_str(rv_updated.date())
rv_state_str = rv_state
if not up_to_date:
rv_state_str += " (new commits)"
self.data.append([rv_updated.date(), rv_updated_str, url, rv_state_str])
# Sort by url, then by reversed date:
self.data = sorted(self.data, key=lambda x: x[2])
self.data = sorted(self.data, key=lambda x: x[0], reverse=True)
def print(self):
if len(self.data) == 0:
click.secho("Nothing... 😿", bold=True)
else:
click.echo(tabulate([x[1:] for x in self.data], headers=["UPDATED", "PULL REQUEST", "STATE"]))
click.secho("Count: {}".format(len(self.data)), bold=True)
class OrganizationListQuery(GraphQLQuery):
def __init__(self):
super().__init__(S.ORGANIZATION_LIST_STATEMENT)
self.data = []
def _handle_response(self, response):
orgas = response["data"]["viewer"]["organizations"]["edges"]
for orga in orgas:
login = orga["node"]["login"]
url = orga["node"]["url"]
self.data.append([login, url])
self.data = sorted(self.data, key=lambda x: x[0].lower())
def print(self):
if len(self.data) == 0:
click.secho("You do not belong to any organization 😿", bold=True)
else:
click.echo(tabulate(self.data, headers=["ORGANIZATION", "URL"]))
click.secho("Count: {}".format(len(self.data)), bold=True)
class OrganizationMemberListQuery(GraphQLQuery):
def __init__(self, organization):
super().__init__(
S.ORGANIZATION_MEMBER_LIST_STATEMENT, pagination_offset=100, extra_data={"organization": organization}
)
self.data = []
self.organization = organization
def get_pagination_info(self, response):
return response["data"]["viewer"]["organization"]["membersWithRole"]["pageInfo"]
def get_count(self):
return len(self.data)
def _handle_response(self, response):
for member in response["data"]["viewer"]["organization"]["membersWithRole"]["edges"]:
login = member["node"]["login"]
email = member["node"]["email"]
location = member["node"]["location"]
role = member["role"]
self.data.append([login, email, location, role])
self.data = sorted(self.data, key=lambda x: x[0])
def print(self):
click.secho("{}'s members".format(self.organization), bold=True)
click.echo(tabulate(self.data, headers=["NAME", "EMAIL", "LOCATION", "ROLE"]))
click.secho("Count: {}".format(len(self.data)), bold=True)
class RateLimitQuery(GraphQLQuery):
def __init__(self):
super().__init__(S.RATE_LIMIT_STATEMENT)
self.limit = None
self.remaining = None
self.reset_at = None
def _handle_response(self, response):
rate_limit = response["data"]["rateLimit"]
self.limit = rate_limit["limit"]
self.remaining = rate_limit["remaining"]
self.reset_at = rate_limit["resetAt"]
def print(self):
click.secho("GitHub usage: {}/{} until {}".format(self.remaining, self.limit, self.reset_at), bold=True)
class PullRequestListQuery(GraphQLQuery):
def __init__(self, labels):
super().__init__(S.PULL_REQUEST_LIST_STATEMENT)
self.data = []
self.labels = labels
def _handle_response(self, response):
for pr in response["data"]["viewer"]["pullRequests"]["edges"]:
created = dt_for_str(pr["node"]["createdAt"]).date()
url = pr["node"]["url"]
title = shorten_str(pr["node"]["title"])
mergeable = pr["node"]["mergeable"]
created_str = days_ago_str(created)
if self.labels:
pr_labels = [x["node"]["name"].lower() for x in pr["node"]["labels"]["edges"]]
if not set(self.labels).issubset(set(pr_labels)):
continue
self.data.append([created, created_str, url, title, mergeable])
# Sort by url, then by reversed date:
self.data = sorted(self.data, key=lambda x: x[2])
self.data = sorted(self.data, key=lambda x: x[0], reverse=True)
def print(self):
if len(self.data) == 0:
click.secho("Nothing... 😿 Time to push hard 💪", bold=True)
else:
click.echo(tabulate([x[1:] for x in self.data], headers=["CREATED", "URL", "TITLE", "MERGEABLE"]))
click.secho("Count: {}".format(len(self.data)), bold=True)
class OrgaPullRequestListQuery(GraphQLQuery):
def __init__(self, labels, organization):
super().__init__(
S.ORGA_PULL_REQUEST_LIST_STATEMENT, pagination_offset=10, extra_data={"organization": organization}
)
self.data = []
self.labels = labels
def get_count(self):
return len(self.data)
def get_pagination_info(self, response):
return response["data"]["search"]["pageInfo"]
def _handle_response(self, response):
for pr in response["data"]["search"]["edges"]:
created = dt_for_str(pr["node"]["createdAt"]).date()
url = pr["node"]["url"]
title = shorten_str(pr["node"]["title"])
created_str = days_ago_str(created)
if self.labels:
pr_labels = [x["node"]["name"].lower() for x in pr["node"]["labels"]["edges"]]
if not set(self.labels).issubset(set(pr_labels)):
continue
self.data.append([created, created_str, url, title])
# Sort by url, then by reversed date:
self.data = sorted(self.data, key=lambda x: x[2])
self.data = sorted(self.data, key=lambda x: x[0], reverse=True)
def print(self):
if len(self.data) == 0:
click.secho("Nothing... 😿 Time to push hard 💪", bold=True)
else:
click.echo(tabulate([x[1:] for x in self.data], headers=["CREATED", "URL", "TITLE"]))
click.secho("Count: {}".format(len(self.data)), bold=True)
class ContributionListQuery:
def __init__(self, dt_from, dt_to, organization=None):
# TODO reengineer this class - create a generic one capable of executing
# queries sequentially
self.pr_query = PullRequestContributionListQuery(dt_from, dt_to, organization)
self.rv_query = PullRequestReviewContributionListQuery(dt_from, dt_to, organization)
def execute(self):
self.pr_query.execute()
self.rv_query.execute()
def print(self):
data = []
data.extend(self.pr_query.data)
data.extend(self.rv_query.data)
# Sort by url, then by reversed date:
data = sorted(data, key=lambda x: (x[1], x[2], x[3]))
data = sorted(data, key=lambda x: x[0], reverse=True)
if len(data) == 0:
click.secho("Nothing... 😿 Time to push hard 💪", bold=True)
else:
click.echo(tabulate(data, headers=["CREATED", "PULL REQUEST", "ROLE", "TITLE"]))
click.secho("Count: {}".format(len(data)), bold=True)
class ContributionStatsQuery(GraphQLQuery):
def __init__(self):
super().__init__(S.CONTRIBUTION_STATS_STATEMENT)
self.data = []
def _handle_response(self, response):
for k, v in response["data"]["viewer"]["contributionsCollection"].items():
# `camelCase` to `Title case`
key = "".join([" " + x.lower() if x.isupper() else x for x in k])
key = key[0].upper() + key[1:]
self.data.append([key, v])
self.data = sorted(self.data, key=lambda x: x[0])
def tabulate(self):
return tabulate(self.data, headers=["STAT", "VALUE"])
def print(self):
click.echo(self.tabulate())
class PullRequestContributionListQuery(GraphQLQuery):
def __init__(self, dt_from, dt_to, organization=None):
super().__init__(
S.PULL_REQUEST_CONTRIBUTION_LIST_STATEMENT,
pagination_offset=100,
extra_data={"organization": organization, "from": dt_from.isoformat(), "to": dt_to.isoformat()},
)
self.data = []
def get_count(self):
return len(self.data)
def get_pagination_info(self, response):
return response["data"]["viewer"]["contributionsCollection"]["pullRequestContributions"]["pageInfo"]
def _handle_response(self, response):
pr_contributions = response["data"]["viewer"]["contributionsCollection"]["pullRequestContributions"]["edges"]
for pr_contribution in pr_contributions:
created = dt_for_str(pr_contribution["node"]["pullRequest"]["createdAt"]).date()
url = pr_contribution["node"]["pullRequest"]["url"]
title = shorten_str(pr_contribution["node"]["pullRequest"]["title"])
self.data.append([created, url, "OWNER", title])
# Sort by url, then by reversed date:
self.data = sorted(self.data, key=lambda x: (x[1], x[2], x[3]))
self.data = sorted(self.data, key=lambda x: x[0], reverse=True)
def tabulate(self):
return tabulate(self.data, headers=["CREATED", "PULL REQUEST", "ROLE", "TITLE"])
def print(self):
if len(self.data) == 0:
click.secho("Nothing... 😿 Time to push hard 💪", bold=True)
else:
click.echo(self.tabulate())
click.secho("Count: {}".format(len(self.data)), bold=True)
class PullRequestReviewContributionListQuery(GraphQLQuery):
def __init__(self, dt_from, dt_to, organization=None):
super().__init__(
S.PULL_REQUEST_REVIEW_CONTRIBUTION_LIST_STATEMENT,
pagination_offset=100,
extra_data={"organization": organization, "from": dt_from.isoformat(), "to": dt_to.isoformat()},
)
self.data = []
def get_count(self):
return len(self.data)
def get_pagination_info(self, response):
return response["data"]["viewer"]["contributionsCollection"]["pullRequestReviewContributions"]["pageInfo"]
def _handle_response(self, response):
rv_contributions = response["data"]["viewer"]["contributionsCollection"]["pullRequestReviewContributions"][
"edges"
]
for rv_contribution in rv_contributions:
created = dt_for_str(rv_contribution["node"]["pullRequestReview"]["publishedAt"]).date()
url = rv_contribution["node"]["pullRequest"]["url"]
title = shorten_str(rv_contribution["node"]["pullRequest"]["title"])
self.data.append([created, url, "REVIEWER", title])
# Sort by url, then by reversed date:
self.data = sorted(self.data, key=lambda x: (x[1], x[2], x[3]))
self.data = sorted(self.data, key=lambda x: x[0], reverse=True)
def tabulate(self):
return tabulate(self.data, headers=["CREATED", "PULL REQUEST", "ROLE", "TITLE"])
def print(self):
if len(self.data) == 0:
click.secho("Nothing... 😿 Time to push hard 💪", bold=True)
else:
click.echo(self.tabulate())
click.secho("Count: {}".format(len(self.data)), bold=True)
class OneDayContributionListQuery(GraphQLQuery):
def __init__(self, report_dt):
super().__init__(S.ONE_DAY_CONTRIBUTION_LIST_STATEMENT, [], extra_data={"date": report_dt.isoformat()})
self.data = []
def _handle_response(self, response):
pr_contributions = response["data"]["viewer"]["contributionsCollection"]["pullRequestContributions"]["edges"]
rv_contributions = response["data"]["viewer"]["contributionsCollection"]["pullRequestReviewContributions"][
"edges"
]
for pr_contribution in pr_contributions:
url = pr_contribution["node"]["pullRequest"]["url"]
title = shorten_str(pr_contribution["node"]["pullRequest"]["title"])
state = pr_contribution["node"]["pullRequest"]["state"]
self.data.append([url, "OWNER", state, title])
for rv_contribution in rv_contributions:
url = rv_contribution["node"]["pullRequest"]["url"]
title = shorten_str(rv_contribution["node"]["pullRequest"]["title"])
state = rv_contribution["node"]["pullRequestReview"]["state"]
self.data.append([url, "REVIEWER", state, title])
self.data = sorted(self.data, key=lambda x: (x[0], x[1], x[2], x[3]))
def tabulate(self):
return tabulate([x[:-1] for x in self.data], headers=["PULL REQUEST", "ROLE", "STATE"])
def print(self):
click.echo(self.tabulate())
def get_contrib_str(self):
return ["{} ({})".format(x[3], x[1].lower()) for x in self.data]
class BranchListQuery(GraphQLQuery):
def __init__(self, emails=None, is_dangling=False):
super().__init__(S.BRANCH_LIST_STATEMENT, pagination_offset=10)
self.data = []
self.emails = emails
self.is_dangling = is_dangling
def get_pagination_info(self, response):
return response["data"]["viewer"]["repositoriesContributedTo"]["pageInfo"]
def get_count(self):
return len(self.data)
def _handle_response(self, response):
for repo in response["data"]["viewer"]["repositoriesContributedTo"]["edges"]:
repo_url = repo["node"]["url"]
for branch in repo["node"]["refs"]["edges"]:
branch_name = branch["node"]["name"]
author_email = branch["node"]["target"]["author"]["email"]
pr_list = []
for pr in branch["node"]["associatedPullRequests"]["edges"]:
pr_list.append(pr["node"]["url"])
if self.is_dangling and pr_list:
continue
pr_list = sorted(pr_list)
if self.emails is not None:
if author_email in self.emails:
if self.is_dangling:
self.data.append([repo_url, branch_name])
else:
self.data.append([repo_url, branch_name, "\n".join(pr_list)])
self.data = sorted(self.data, key=lambda x: (x[0], x[1]))
def print(self):
no_results_message = "Nothing... 😿 Time to push hard 💪"
headers = ["REPO", "BRANCH", "PULL REQUEST"]
if self.is_dangling:
no_results_message = "Everything is clean 👏"
headers = ["REPO", "BRANCH"]
if len(self.data) == 0:
click.secho(no_results_message, bold=True)
else:
click.echo(tabulate(self.data, headers=headers))
click.secho("Count: {}".format(len(self.data)), bold=True)
class EmailQuery(RESTQuery):
def __init__(self):
super().__init__("/user/emails")
self.emails = None
def _handle_response(self, response):
self.emails = [x["email"] for x in response]
def get_emails(self):
return self.emails
```
#### File: yogit/api/requester.py
```python
import requests
import click
from requests_toolbelt.utils import dump
from yogit.yogit.logger import LOGGER
def http_call(method, url, **kwargs):
"""
Perform HTTP call
- Check status code
- Check JSON validity
- Return reponse content as a dict
"""
try:
response = requests.request(method, url, **kwargs)
if response:
LOGGER.info("Response: %s", response.status_code)
else:
LOGGER.info("Response: %s", dump.dump_all(response).decode("utf-8"))
except requests.RequestException as exception:
LOGGER.error(str(exception))
raise click.ClickException(str(exception))
LOGGER.debug(response.content[:500])
if response.status_code == 200:
try:
return response.json()
except Exception as exception:
LOGGER.error(str(exception))
raise click.ClickException(response.text)
elif response.status_code == 400:
raise click.ClickException("Bad request")
elif response.status_code == 401:
raise click.ClickException("Unauthorized")
elif response.status_code == 500:
raise click.ClickException("Internal server error")
else:
raise click.ClickException(response.text)
```
#### File: yogit/storage/storage.py
```python
import os
import yaml
import click
from yogit.yogit.logger import LOGGER
class Storage:
""" Storage based on YAML file """
def __init__(self, filename, version=None):
self.filename = filename
self.version = version
def get_path(self):
"""
Get storage file path
"""
return self.filename
def load(self):
""" Load YAML """
try:
with open(self.filename, "r") as yaml_file:
return yaml.load(yaml_file, Loader=yaml.FullLoader) or {}
except OSError as error:
LOGGER.error(str(error))
return {}
except Exception as error:
raise click.ClickException("Cannot parse `{}`: {}".format(self.get_path(), str(error)))
LOGGER.error(str(error))
def save(self, data):
with open(self.filename, "w") as yaml_file:
if data is not None and self.version is not None:
data["version"] = self.version
yaml.safe_dump(data, stream=yaml_file, indent=4)
def get_version(self):
data = self.load()
return data.get("version", None) or None
```
#### File: tests/mocks/mock_settings.py
```python
import os
import tempfile
from contextlib import contextmanager
from unittest.mock import patch
import pytest
from yogit.yogit.settings import Settings
@contextmanager
def named_temporary_file(*args, **kwds):
"""
Context manager to handle temp file on all OS.
Windows does not allow processes other than the one used to create the NamedTemporaryFile
to access the file when using delete=True (the default).
=> Closing the file manually.
"""
tmpfile = tempfile.NamedTemporaryFile(delete=False)
try:
yield tmpfile
finally:
tmpfile.close()
os.unlink(tmpfile.name)
@pytest.fixture(scope="function")
def temporary_scrum_report():
"""
Make scrum report use temporary config file
"""
with named_temporary_file() as scrum_report_file:
print("Using scrum report `%s`" % scrum_report_file.name)
with patch("yogit.yogit.settings.get_scrum_report_path", return_value=scrum_report_file.name):
yield
@pytest.fixture(scope="function")
def temporary_settings():
"""
Make settings use temporary config file
"""
with named_temporary_file() as settings_file:
print("Using temporary settings `%s`" % settings_file.name)
with patch("yogit.yogit.settings.get_settings_path", return_value=settings_file.name):
yield
@pytest.fixture(scope="function")
def mock_settings():
"""
Make settings use temporary config file and fill them
"""
with named_temporary_file() as settings_file:
print("Using mock settings `%s`" % settings_file.name)
with patch("yogit.yogit.settings.get_settings_path", return_value=settings_file.name):
fill_settings()
yield
def fill_settings():
settings = Settings()
settings.set_github_emails(["<EMAIL>", "<EMAIL>", "<EMAIL>"])
settings.set_github_login("user1")
settings.set_github_token("github_token")
settings.set_slack_token("slack_token")
settings.set_slack_channel("slack_channel")
def assert_empty_settings():
settings = Settings()
assert settings.get_github_token() == ""
assert settings.get_github_login() == ""
assert settings.get_github_emails() == []
assert settings.get_slack_token() == ""
assert settings.get_slack_channel() == ""
```
#### File: yogit/tests/test_branch.py
```python
import responses
import pytest
from click.testing import CliRunner
from yogit.yogit import cli
from yogit.yogit.errors import ExitCode
from yogit.api.client import GITHUB_API_URL_V4
from yogit.tests.mocks.mock_settings import mock_settings
def _add_graphql_response(json):
responses.add(responses.POST, GITHUB_API_URL_V4, json=json, status=200)
@pytest.fixture
def runner():
return CliRunner()
@pytest.mark.usefixtures("mock_settings")
@responses.activate
def test_empty_br_list_no_repo(runner):
_add_graphql_response(
{
"data": {
"viewer": {
"repositoriesContributedTo": {"pageInfo": {"hasNextPage": False, "endCursor": None}, "edges": []}
}
}
}
)
# Without --dangling
result = runner.invoke(cli.main, ["branch", "list"])
assert result.exit_code == ExitCode.NO_ERROR.value
assert result.output == ("Nothing... 😿 Time to push hard 💪\n")
# With --dangling
result = runner.invoke(cli.main, ["branch", "list", "--dangling"])
assert result.exit_code == ExitCode.NO_ERROR.value
assert result.output == ("Everything is clean 👏\n")
@pytest.mark.usefixtures("mock_settings")
@responses.activate
def test_empty_br_list_no_branch(runner):
_add_graphql_response(
{
"data": {
"viewer": {
"repositoriesContributedTo": {
"pageInfo": {"hasNextPage": False, "endCursor": None},
"edges": [
{"node": {"url": "https://", "refs": {"edges": []}}},
{"node": {"url": "https://", "refs": {"edges": []}}},
],
}
}
}
}
)
# Without --dangling
result = runner.invoke(cli.main, ["branch", "list"])
assert result.exit_code == ExitCode.NO_ERROR.value
assert result.output == ("Nothing... 😿 Time to push hard 💪\n")
# With --dangling
result = runner.invoke(cli.main, ["branch", "list", "--dangling"])
assert result.exit_code == ExitCode.NO_ERROR.value
assert result.output == ("Everything is clean 👏\n")
@pytest.mark.usefixtures("mock_settings")
@responses.activate
def test_br_list(runner):
response_part_1 = {
"data": {
"viewer": {
"repositoriesContributedTo": {
"pageInfo": {"hasNextPage": True, "endCursor": "cursor_id"},
"edges": [
{
"node": {
"url": "https://xyz",
"refs": {
"edges": [
{
"node": {
"associatedPullRequests": {"edges": []},
"name": "xyz",
"target": {"author": {"email": "<EMAIL>", "name": "user1"}},
}
}
]
},
}
},
{
"node": {
"url": "https://xyz",
"refs": {
"edges": [
{
"node": {
"associatedPullRequests": {"edges": []},
"name": "abc",
"target": {"author": {"email": "<EMAIL>", "name": "user1"}},
}
}
]
},
}
},
{
"node": {
"url": "https://abc",
"refs": {
"edges": [
{
"node": {
"associatedPullRequests": {"edges": []},
"name": "no_pull_request",
"target": {"author": {"email": "<EMAIL>", "name": "user1"}},
}
}
]
},
}
},
{
"node": {
"url": "https://def",
"refs": {
"edges": [
{
"node": {
"associatedPullRequests": {
"edges": [
{"node": {"url": "https://xyz"}},
{"node": {"url": "https://abc"}},
]
},
"name": "has_pull_request",
"target": {"author": {"email": "<EMAIL>", "name": "user1"}},
}
},
{
"node": {
"associatedPullRequests": {"edges": []},
"name": "notmine",
"target": {"author": {"email": "<EMAIL>", "name": "notme"}},
}
},
]
},
}
},
],
}
}
}
}
response_part_2 = {
"data": {
"viewer": {
"repositoriesContributedTo": {
"pageInfo": {"hasNextPage": False, "endCursor": None},
"edges": [
{
"node": {
"url": "https://fgh",
"refs": {
"edges": [
{
"node": {
"associatedPullRequests": {"edges": []},
"name": "xyz",
"target": {"author": {"email": "<EMAIL>", "name": "user1"}},
}
}
]
},
}
}
],
}
}
}
}
_add_graphql_response(response_part_1)
_add_graphql_response(response_part_2)
result = runner.invoke(cli.main, ["branch", "list"])
assert result.exit_code == ExitCode.NO_ERROR.value
assert result.output == (
"REPO BRANCH PULL REQUEST\n"
"----------- ---------------- --------------\n"
"https://abc no_pull_request\n"
"https://def has_pull_request https://abc\n"
" https://xyz\n"
"https://fgh xyz\n"
"https://xyz abc\n"
"https://xyz xyz\n"
"Count: 5\n"
)
@pytest.mark.usefixtures("mock_settings")
@responses.activate
def test_br_list(runner):
response_part_1 = {
"data": {
"viewer": {
"repositoriesContributedTo": {
"pageInfo": {"hasNextPage": False, "endCursor": None},
"edges": [
{
"node": {
"url": "https://xyz",
"refs": {
"edges": [
{
"node": {
"associatedPullRequests": {"edges": []},
"name": "xyz",
"target": {"author": {"email": "<EMAIL>", "name": "user1"}},
}
}
]
},
}
},
{
"node": {
"url": "https://xyz",
"refs": {
"edges": [
{
"node": {
"associatedPullRequests": {"edges": []},
"name": "abc",
"target": {"author": {"email": "<EMAIL>", "name": "user1"}},
}
}
]
},
}
},
{
"node": {
"url": "https://abc",
"refs": {
"edges": [
{
"node": {
"associatedPullRequests": {"edges": []},
"name": "no_pull_request",
"target": {"author": {"email": "<EMAIL>", "name": "user1"}},
}
}
]
},
}
},
{
"node": {
"url": "https://def",
"refs": {
"edges": [
{
"node": {
"associatedPullRequests": {
"edges": [
{"node": {"url": "https://xyz"}},
{"node": {"url": "https://abc"}},
]
},
"name": "has_pull_request",
"target": {"author": {"email": "<EMAIL>", "name": "user1"}},
}
},
{
"node": {
"associatedPullRequests": {"edges": []},
"name": "notmine",
"target": {"author": {"email": "<EMAIL>", "name": "notme"}},
}
},
]
},
}
},
],
}
}
}
}
_add_graphql_response(response_part_1)
result = runner.invoke(cli.main, ["branch", "list", "--dangling"])
assert result.exit_code == ExitCode.NO_ERROR.value
assert result.output == (
"REPO BRANCH\n"
"----------- ---------------\n"
"https://abc no_pull_request\n"
"https://xyz abc\n"
"https://xyz xyz\n"
"Count: 3\n"
)
```
#### File: yogit/tests/test_rest_client.py
```python
import click
import responses
import pytest
from yogit.api.client import RESTClient, GITHUB_API_URL_V3
def _add_response(status, json):
responses.add(responses.GET, GITHUB_API_URL_V3 + "/endpoint", json=json, status=status)
@responses.activate
def test_ok_200():
_add_response(200, {"data": "result"})
client = RESTClient()
assert client.get("/endpoint") == {"data": "result"}
@responses.activate
def test_ko_400():
_add_response(400, {"error": "result"})
client = RESTClient()
with pytest.raises(click.ClickException) as e:
client.get("/endpoint")
assert str(e.value) == "Bad request"
@responses.activate
def test_ko_401():
_add_response(401, {"error": "result"})
client = RESTClient()
with pytest.raises(click.ClickException) as e:
client.get("/endpoint")
assert str(e.value) == "Unauthorized"
@responses.activate
def test_ko():
_add_response(500, {"error": "result"})
client = RESTClient()
with pytest.raises(click.ClickException) as e:
client.get("/endpoint")
assert str(e.value) == "Internal server error"
```
#### File: yogit/tests/test_statement.py
```python
from unittest.mock import patch
import pytest
from yogit.api.statement import prepare
import yogit.api.statements as S
from yogit.tests.mocks.mock_settings import mock_settings
@pytest.mark.usefixtures("mock_settings")
def test_prepare_no_token():
statement = """
{
login: @login@,
date: @today@
}
"""
assert prepare(statement, []) == statement
@pytest.mark.usefixtures("mock_settings")
def test_prepare_only_one_token():
statement = """
{
login: $login,
date: $today
}
"""
assert (
prepare(statement, [S.LOGIN_VARIABLE])
== """
{
login: user1,
date: $today
}
"""
)
@pytest.mark.usefixtures("mock_settings")
@patch("yogit.api.statement.today_earliest_str", return_value="2019-07-01")
def test_prepare_all_tokens(mock_today_earliest_str):
statement = """
{
login: $login,
date: $today
}
"""
assert (
prepare(statement, [S.LOGIN_VARIABLE, S.TODAY_VARIABLE])
== """
{
login: user1,
date: 2019-07-01
}
"""
)
```
#### File: yogit/utils/spinner.py
```python
import platform
from halo import Halo
def get_spinner_object():
spinner_type = "dots"
if platform.system() == "Windows":
# Avoid unicode char issue
spinner_type = "line"
return Halo(text="Loading", spinner=spinner_type, color=None)
def spin(func):
"""
Wrap Halo within a decorator then it's easy to disable it.
"""
def inner(self, *args, **kwargs):
with get_spinner_object() as spinner:
func(self, spinner, *args, **kwargs)
return inner
```
#### File: yogit/yogit/checks.py
```python
import click
from yogit.yogit.settings import Settings
from yogit.yogit.update_checker import UpdateChecker
def account_required(func):
"""
Check if account setup has been performed
"""
def wrapper(self, *args, **kwargs):
# pylint: disable=missing-docstring
if not Settings().is_github_valid():
raise click.ClickException("Account required, please `yogit account setup` first.")
func(self, *args, **kwargs)
return wrapper
def _check_update():
UpdateChecker().check()
def check_update(func):
"""
Check if a new version of yogit is available
"""
def wrapper(self, *args, **kwargs):
# pylint: disable=missing-docstring
_check_update()
func(self, *args, **kwargs)
return wrapper
```
#### File: yogit/yogit/logger.py
```python
import logging
import os
import sys
from yogit import get_name, get_version
from yogit.yogit.paths import get_log_path, SETTINGS_DIR
def get_logger(stdout=False, logger_name=get_name(), version=get_version()):
"""
Create and configure a logger using a given name.
"""
os.makedirs(SETTINGS_DIR, exist_ok=True)
application_str = logger_name
if version:
application_str += " " + version
formatter = logging.Formatter(
fmt=(
"%(asctime)s "
"[{application}:%(process)d] "
"[%(levelname)s] "
"%(message)s".format(application=application_str)
),
datefmt="%Y-%m-%dT%H:%M:%S%z",
)
file_log_handler = logging.FileHandler(get_log_path())
file_log_handler.setLevel(logging.DEBUG)
file_log_handler.setFormatter(formatter)
local_logger = logging.getLogger(logger_name)
local_logger.setLevel(logging.DEBUG)
local_logger.addHandler(file_log_handler)
if stdout:
console_log_handler = logging.StreamHandler(sys.stdout)
console_log_handler.setLevel(logging.DEBUG)
console_log_handler.setFormatter(formatter)
local_logger.addHandler(console_log_handler)
return local_logger
LOGGER = get_logger()
def enable_stdout():
"""
Prints logs in stdout
"""
global LOGGER # pylint: disable=global-statement
LOGGER = get_logger(stdout=True)
```
#### File: yogit/yogit/pullrequest.py
```python
import click
from yogit.api.queries import PullRequestListQuery, OrgaPullRequestListQuery
from yogit.yogit.checks import account_required, check_update
@click.group("pr")
def pull_request():
"""
Pull request actions
"""
@click.command("list", help="List your opened pull requests")
@click.option("--orga", type=click.STRING, help="Expand results to a specific organization")
@click.option(
"--label",
type=click.STRING,
multiple=True,
help="Only show pull requests having such label (several --label can be set)",
)
@click.pass_context
@account_required
@check_update
def pull_request_list(ctx, orga, label): # pylint: disable=unused-argument
"""
List pull requests
"""
labels = [x.lower() for x in label]
if orga:
query = OrgaPullRequestListQuery(labels, orga)
else:
query = PullRequestListQuery(labels)
query.execute() # pylint: disable=no-value-for-parameter
query.print()
pull_request.add_command(pull_request_list)
``` |
{
"source": "jmangs/prometheus-pve-exporter",
"score": 3
} |
#### File: src/pve_exporter/cli.py
```python
import sys
from argparse import ArgumentParser
from pve_exporter.http import start_http_server
def main(args=None):
"""
Main entry point.
"""
parser = ArgumentParser()
parser.add_argument('config', nargs='?', default='pve.yml',
help='Path to configuration file (pve.yml)')
parser.add_argument('port', nargs='?', type=int, default='9221',
help='Port on which the exporter is listening (9221)')
parser.add_argument('address', nargs='?', default='',
help='Address to which the exporter will bind')
params = parser.parse_args(args if args is None else sys.argv[1:])
start_http_server(params.config, params.port, params.address)
```
#### File: src/pve_exporter/collector.py
```python
import itertools
from proxmoxer import ProxmoxAPI
from prometheus_client import CollectorRegistry, generate_latest
from prometheus_client.core import GaugeMetricFamily
class StatusCollector(object):
"""
Collects Proxmox VE Node/VM/CT-Status
# HELP pve_up Node/VM/CT-Status is online/running
# TYPE pve_up gauge
pve_up{id="node/proxmox-host"} 1.0
pve_up{id="cluster/pvec"} 1.0
pve_up{id="lxc/101"} 1.0
pve_up{id="qemu/102"} 1.0
"""
def __init__(self, pve):
self._pve = pve
def collect(self): # pylint: disable=missing-docstring
status_metrics = GaugeMetricFamily(
'pve_up',
'Node/VM/CT-Status is online/running',
labels=['id'])
for entry in self._pve.cluster.status.get():
if entry['type'] == 'node':
label_values = [entry['id']]
status_metrics.add_metric(label_values, entry['online'])
elif entry['type'] == 'cluster':
label_values = ['cluster/{:s}'.format(entry['name'])]
status_metrics.add_metric(label_values, entry['quorate'])
else:
raise ValueError('Got unexpected status entry type {:s}'.format(entry['type']))
for resource in self._pve.cluster.resources.get(type='vm'):
label_values = [resource['id']]
status_metrics.add_metric(label_values, resource['status'] == 'running')
yield status_metrics
class VersionCollector(object):
"""
Collects Proxmox VE build information. E.g.:
# HELP pve_version_info Proxmox VE version info
# TYPE pve_version_info gauge
pve_version_info{release="15",repoid="7599e35a",version="4.4"} 1.0
"""
LABEL_WHITELIST = ['release', 'repoid', 'version']
def __init__(self, pve):
self._pve = pve
def collect(self): # pylint: disable=missing-docstring
version_items = self._pve.version.get().items()
version = {key: value for key, value in version_items if key in self.LABEL_WHITELIST}
labels, label_values = zip(*version.items())
metric = GaugeMetricFamily(
'pve_version_info',
'Proxmox VE version info',
labels=labels
)
metric.add_metric(label_values, 1)
yield metric
class ClusterNodeCollector(object):
"""
Collects Proxmox VE cluster node information. E.g.:
# HELP pve_node_info Node info
# TYPE pve_node_info gauge
pve_node_info{id="node/proxmox-host", ip="10.20.30.40", level="c",
local="1", name="proxmox-host", nodeid="0"} 1.0
"""
def __init__(self, pve):
self._pve = pve
def collect(self): # pylint: disable=missing-docstring
nodes = [entry for entry in self._pve.cluster.status.get() if entry['type'] == 'node']
if nodes:
# Remove superflous keys.
for node in nodes:
del node['type']
del node['online']
# Yield remaining data.
labels = nodes[0].keys()
info_metrics = GaugeMetricFamily(
'pve_node_info',
'Node info',
labels=labels)
for node in nodes:
label_values = [str(node[key]) for key in labels]
info_metrics.add_metric(label_values, 1)
yield info_metrics
class ClusterInfoCollector(object):
"""
Collects Proxmox VE cluster information. E.g.:
# HELP pve_cluster_info Cluster info
# TYPE pve_cluster_info gauge
pve_cluster_info{id="cluster/pvec",nodes="2",quorate="1",version="2"} 1.0
"""
def __init__(self, pve):
self._pve = pve
def collect(self): # pylint: disable=missing-docstring
clusters = [entry for entry in self._pve.cluster.status.get() if entry['type'] == 'cluster']
if clusters:
# Remove superflous keys.
for cluster in clusters:
del cluster['type']
# Add cluster-prefix to id.
for cluster in clusters:
cluster['id'] = 'cluster/{:s}'.format(cluster['name'])
del cluster['name']
# Yield remaining data.
labels = clusters[0].keys()
info_metrics = GaugeMetricFamily(
'pve_cluster_info',
'Cluster info',
labels=labels)
for cluster in clusters:
label_values = [str(cluster[key]) for key in labels]
info_metrics.add_metric(label_values, 1)
yield info_metrics
class ClusterResourcesCollector(object):
"""
Collects Proxmox VE cluster resources information, i.e. memory, storage, cpu
usage for cluster nodes and guests.
"""
def __init__(self, pve):
self._pve = pve
def collect(self): # pylint: disable=missing-docstring
metrics = {
'maxdisk': GaugeMetricFamily(
'pve_disk_size_bytes',
'Size of storage device',
labels=['id']),
'disk': GaugeMetricFamily(
'pve_disk_usage_bytes',
'Disk usage in bytes',
labels=['id']),
'maxmem': GaugeMetricFamily(
'pve_memory_size_bytes',
'Size of memory',
labels=['id']),
'mem': GaugeMetricFamily(
'pve_memory_usage_bytes',
'Memory usage in bytes',
labels=['id']),
'netout': GaugeMetricFamily(
'pve_network_transmit_bytes',
'Number of bytes transmitted over the network',
labels=['id']),
'netin': GaugeMetricFamily(
'pve_network_receive_bytes',
'Number of bytes received over the network',
labels=['id']),
'diskwrite': GaugeMetricFamily(
'pve_disk_write_bytes',
'Number of bytes written to storage',
labels=['id']),
'diskread': GaugeMetricFamily(
'pve_disk_read_bytes',
'Number of bytes read from storage',
labels=['id']),
'cpu': GaugeMetricFamily(
'pve_cpu_usage_ratio',
'CPU usage (value between 0.0 and pve_cpu_usage_limit)',
labels=['id']),
'maxcpu': GaugeMetricFamily(
'pve_cpu_usage_limit',
'Maximum allowed CPU usage',
labels=['id']),
'uptime': GaugeMetricFamily(
'pve_uptime_seconds',
'Number of seconds since the last boot',
labels=['id']),
}
info_metrics = {
'guest': GaugeMetricFamily(
'pve_guest_info',
'VM/CT info',
labels=['id', 'node', 'name', 'type']),
'storage': GaugeMetricFamily(
'pve_storage_info',
'Storage info',
labels=['id', 'node', 'storage']),
}
info_lookup = {
'lxc': {
'labels': ['id', 'node', 'name', 'type'],
'gauge': info_metrics['guest'],
},
'qemu': {
'labels': ['id', 'node', 'name', 'type'],
'gauge': info_metrics['guest'],
},
'storage': {
'labels': ['id', 'node', 'storage'],
'gauge': info_metrics['storage'],
},
}
for resource in self._pve.cluster.resources.get():
restype = resource['type']
if restype in info_lookup:
label_values = [resource[key] for key in info_lookup[restype]['labels']]
info_lookup[restype]['gauge'].add_metric(label_values, 1)
label_values = [resource['id']]
for key, metric_value in resource.items():
if key in metrics:
metrics[key].add_metric(label_values, metric_value)
return itertools.chain(metrics.values(), info_metrics.values())
def collect_pve(config, host):
"""Scrape a host and return prometheus text format for it"""
pve = ProxmoxAPI(host, **config)
registry = CollectorRegistry()
registry.register(StatusCollector(pve))
registry.register(ClusterResourcesCollector(pve))
registry.register(ClusterNodeCollector(pve))
registry.register(ClusterInfoCollector(pve))
registry.register(VersionCollector(pve))
return generate_latest(registry)
``` |
{
"source": "jmanly56/basicml",
"score": 4
} |
#### File: basicml/layers/input.py
```python
import activations
from typing import Tuple, Union
import numpy as np
import numpy.typing as npt
from layers import Layer
class Input(Layer):
"""
The input layer of the neural network. Derives from `Layer`.
The purpose of this layer is to make sure the input is correctly shaped.
"""
def __init__(self) -> None:
"""Init the layer."""
super().__init__(trainable=False)
def build(self, input_shape: tuple):
"""
Build the layer with the given input shape.
Parameters:
input_shape: The shape to expect the inputs in.
"""
self.weights = np.empty(
input_shape) # The input layer will not have any weights.
self.__built = True
self.shape = input_shape
def __call__(self, inputs: npt.ArrayLike) -> Tuple[npt.ArrayLike, npt.ArrayLike]:
"""
Take the inputs, and make sure they are in the right shape and type.
Parameters:
inputs: A numpy array containing rows of inputs.
Returns:
The inputs.
"""
if not self.__built:
self.build(inputs.shape)
if not isinstance(inputs, np.ndarray):
raise TypeError("Inputs should be a numpy array/matrix.")
return inputs, []
def get_output_shape(self):
"""Return the output shape of the layer."""
pass
def get_weights(self) -> npt.ArrayLike:
"""Return the weights of the layer."""
return np.empty(self.shape)
def get_activation(self) -> Union[activations.Activation, None]:
"""Return the activation function of the layer."""
return super().get_activation()
def get_biases(self) -> npt.ArrayLike:
"""Return the biases of the layer."""
return np.empty(self.shape)
def _set_weights(self, weights: npt.ArrayLike):
"""Set the weights of the layer."""
pass
def _set_biases(self, biases: npt.ArrayLike):
"""Set the biases of the layer."""
pass
```
#### File: basicml/layers/layer.py
```python
from abc import ABC, abstractmethod
import activations
from typing import Tuple, Union
import numpy as np
import numpy.typing as npt
class Layer(ABC):
"""
The base class that all layers derive from.
All layers must implement the following functions:
build:
get_output_shape
get_weights
get_biases
get_activation
_set_weights
_set_biases
"""
def __init__(self, trainable=True) -> None:
"""Init the layer."""
super().__init__()
self.trainable = trainable
self.weights = None
self.__built = False
@abstractmethod
def build(self, input_shape):
"""
Initialize the weights for the layer based on input size and any other intialization that needs to be done.
This cannot be done until the input shape is known
"""
self.weights = np.zeros(shape=input_shape, dtype='float32')
self.__built = True
@abstractmethod
def get_output_shape(self) -> Tuple:
"""Return the output shape of the layer."""
pass
@abstractmethod
def get_weights(self) -> npt.ArrayLike:
"""Return the weights of the layer."""
return np.empty()
@abstractmethod
def get_biases(self) -> npt.ArrayLike:
"""Return the biases of the layer."""
return np.empty()
@abstractmethod
def get_activation(self) -> Union[activations.Activation, None]:
"""Return the activation of the layer."""
return None
@abstractmethod
def _set_weights(self, weights: npt.ArrayLike):
"""Set the weights of the layer."""
pass
@abstractmethod
def _set_biases(self, biases: npt.ArrayLike):
"""Set the biases of the layer."""
pass
```
#### File: basicml/losses/mse.py
```python
from losses.loss import Loss
import numpy as np
import numpy.typing as npt
from activations import Activation
class MSE(Loss):
"""Implements Mean Squared Error loss. Derives from `Loss`."""
def __call__(self, y_pred: npt.ArrayLike, y_true: npt.ArrayLike) -> npt.ArrayLike:
"""
Output the loss or cost calculated using y_pred and y_true.
Parameters:
y_pred: The predicted output of a layer.
y_true: The True output it should have given.
Return:
The loss of this particular training example.
"""
return 0.5 * np.linalg.norm(y_pred - y_true)**2
def _delta(self, z: npt.ArrayLike, y_pred: npt.ArrayLike,
y_true: npt.ArrayLike, activation: Activation = None) -> npt.ArrayLike:
"""Find the derivative of the loss function with respect to y_true and y_pred."""
if activation is not None:
z = activation.prime(z)
else:
z = 1.0
return np.transpose((y_pred - y_true) * np.transpose(z))
```
#### File: basicml/optimizers/sgd.py
```python
from optimizers.optimizer import Optimizer
import numpy.typing as npt
import numpy as np
class SGD(Optimizer):
"""Implements SGD. Derives from `Optimizer`."""
def __init__(self, learning_rate) -> None:
"""Init the class and learning rate."""
super().__init__(learning_rate)
def apply_gradients(self, gradients: npt.ArrayLike, variables: npt.ArrayLike, batch_size: int) -> npt.ArrayLike:
"""
Apply the gradients to the variables of the model.
Parameters:
gradients: The gradients to adjust with.
variables: The model's variables (weights and biases).
batch_size: The size of each batch.
Returns:
The modified variables.
"""
variables = [w - ((self.lr / batch_size) * gw)
for w, gw in zip(variables, gradients)]
return np.array(variables)
``` |
{
"source": "jmann277/jers_ml_tools",
"score": 4
} |
#### File: jers_ml_tools/jerml/hyperparameters.py
```python
import pandas as pd
import numpy as np
def bagging_hyperparameters(
n_hyperparams,
random_seed=42,
n_estimators_support=(10**0, 10**4),
max_features_support=(.1, .9),
max_samples_support=(.9, 1)
):
'''
Generates a randomized cloud of hyperparameters for an
sklearn bagging classifier/regressor
There are three bagging hyperparameters:
- n_estimators: number of base estimators to be averaged
- number of base estimators to use
- increasing n_estimators decreases variance, mildly increases bias
- max_features: maximum number of features to give base esimator
- increases max_features increases variance, increases bias
- increases effective dimension of base estimators
- increases correlation between base estimators
- max_samples: max number of samples used to train each base estimator
- primarily used to decrease computational burden of training
- decreasing max_samples increases both variance and bias
Example
-------
>>> hyperparameters = bagging_hyperparameters(n_hyperparams=3, random_seed=42)
>>> hyperparameters.columns
['n_estimators', 'max_features', 'max_samples']
Parameters
----------
n_hyperparams : int
Number of hyperparameters to generate
n_estimators_support : tuple of int, (min, max), default=(1, 10**4)
min and max value of n_estimators hyperparatmeters.
max_features_support : tuple of float, (min, max), default=(.1, .9)
min and max value of max_features hyperparatmeters. More specifically,
what percent of the features are shown to the weak learners.
max_samples_support : tuple, (min_value, max_value), default=(.9, 1)
min and max value of max_samples hyperparameters. More specifically,
what percent of the samples are shown to the weak learners.
random_seed_support : int, optional
random state used to generate hyperparameters.
Returns
-------
hyperparameters : Dataframe, shape (n_hyperparams, 3)
dataframe containing values of the parameters for an Bagging
Classifier/Regressor class:
- n_estimators
- max_features
- max_samples
'''
n_estimatorses = np.random.randint(
low=n_estimators_support[0],
high=n_estimators_support[1],
size=n_hyperparams
)
max_featureses = np.random.uniform(
low=max_features_support[0],
high=max_features_support[1],
size=n_hyperparams
)
max_sampleses = np.random.uniform(
low=max_samples_support[0],
high=max_samples_support[1],
size=n_hyperparams
)
hyperparameter_data = {
'n_estimators': n_estimatorses,
'max_features': max_featureses,
'max_samples': max_sampleses
}
return pd.DataFrame(data=hyperparameter_data)
def decision_tree_hyperparameters(
n_hyperparams,
max_depth_support=(3, 10),
min_sample_split_support=(0, .1),
min_samples_leaf_support=(0, .1),
min_impurity_decrease_support=(0, .1),
criteria=['gini', 'entropy'],
random_seed=42,
):
'''
Generates a randomized cloud of hyperparameters for an
sklearn decision tree classifier/regressor
Hyperparameters:
- max_depth
- determines max_depth of tree
- increasing max_depth decreases bias, and increases variance
- min_sample_split
- minimum samples in a region required to make a split
- increasing min_sample_split decreases bias, and increases variance
- min_samples_leaf
- Ensures no leaf contains less that min_samples_leaf samples
- increasing min_samples_leaf decreases bias, and increases variance
- criterion
- determines how tree decides to create a new branch
- unclear how this affects bias-variance decomposition
- min_impurity_decrease
- ensures branch will only be created if it decreases the impurity by min_impurity_decrease
Example
-------
>>> hyperparameters = decision_tree_hyperparameters(n_hyperparams=3, random_seed=42)
>>> hyperparameters.columns
['max_depth', 'min_sample_split', 'min_samples_leaf',
'criterion', 'min_impurity_decrease']
Parameters
----------
n_hyperparams : int
Number of hyperparameters to generate.
max_depth_support : tuple of int, (min, max), default=(3, 10)
min and max value of the maximum tree depth hyperparatmeters.
min_sample_split_support : tuple of float, (min, max), default=
min and max percent of the samples must occur on a leaf
to make an additional split
min_samples_leaf_support : tuple, (min, max), default=
min and max percent of the minimum number of samples required to be
in a leaf.
criterion : list of string, default=
Specifies whether to use gini and/or entropy criteria for splitting.
min_impurity_decrease_support : tuple, (min, max), default=
min and max value of mininumum decrease in the impurity caused by
a split.
random_seed: int, default=42
random state used to generate hyperparameters.
Returns
-------
hyperparameters: Dataframe, shape (n_hyperparams, 6)
dataframe containing values of the parameters for a
Bagging regressor/classifier:
- max_depth
- min_samples_split
- min_samples_leaf
- min_impurity_decrease
- criteria
'''
max_depths = np.random.randint(
low=max_depth_support[0],
high=max_depth_support[1],
size=n_hyperparams
)
min_samples_splits = np.random.uniform(
low=min_sample_split_support[0],
high=min_sample_split_support[1],
size=n_hyperparams
)
min_samples_leaves = np.random.uniform(
low=min_samples_leaf_support[0],
high=min_samples_leaf_support[1],
size=n_hyperparams
)
min_impurity_decreases = np.random.uniform(
low=min_impurity_decrease_support[0],
high=min_impurity_decrease_support[1],
size=n_hyperparams
)
criteria_indices = np.random.randint(
low=0,
high=len(criteria),
size=n_hyperparams
)
criterias = np.array(criteria)[criteria_indices]
hyperparameter_data = {
'max_depth': max_depths,
'min_samples_split': min_samples_splits,
'min_samples_leaf': min_samples_leaves,
'min_impurity_decrease': min_impurity_decreases,
'criteria': criterias
}
return pd.DataFrame(data=hyperparameter_data)
```
#### File: jers_ml_tools/jerml/transformers.py
```python
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import PCA
from scipy.stats import kstat
from homcv import betti_numbers
class CumulantsExtractor(BaseEstimator, TransformerMixin):
'''Scikit-Learn transformer computing cumulants of the features.
Cumulants are universal numerical invariants of probability
distributions. Their interpretation is context dependent. For example,
if the input is an image, these cumulants may be conceptualized as
"textural" features.
Note that this transformer can only compute the first 4 cumulants.
Example
-------
>>> X = np.ones(shape = (1, 100))
This distribution is entirely "deterministic", and we should therefore
expect it to have no cumulants higher that 1, and have an expectation
value of 1.
>>> cumulants_extractor = CumulantsExtractor()
>>> cumulants_extractor.transform(X)
[1, 0, 0, 0]
Attributes
----------
highest_cumulant_ : int
highest cumultant to be computed by the transform method.
'''
def __init__(self, highest_cumulant_=4):
assert highest_cumulant_ <= 4, 'cannot compute cumulant higher than 4'
self.highest_cumulant_ = highest_cumulant_
def fit(self, X, y=None):
'''Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence work in pipelines.
'''
return self
def _get_cumulants(self, v):
kstats = np.array([kstat(data=v, n=k)
for k in range(1, self.highest_cumulant_ + 1)])
return kstats
def transform(self, X, y=None):
'''
Computes cumulants of features less than the specified highest cumulant
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
cumulants: ndarray, shape = (n_samples, highest_cumulant)
cumulants of the empirical distribution determine by data
along axis=1
'''
cumulants = np.apply_along_axis(
func1d=self._get_cumulants,
axis=1,
arr=X,
)
return cumulants
class GrayScaler(BaseEstimator, TransformerMixin):
'''Transforms a color image into grayscale.
Transforms a batch color images into a batch of grayscale images
using 1-component PCA.
'''
def __init__(self):
self.pca = PCA(n_components=1)
pass
def _flatten(self, X):
'''
Flattens the image so that it can be transformed into a form
PCA can transform
'''
assert X.ndim == 4, "batch must be 4 dimensional"
n_color_channels = X.shape[-1]
X_flat = X.reshape(-1, n_color_channels)
return X_flat
def _unflatten(self, X_grayscale_flat, n_samples, image_dimensions):
''' Unflattens image, making it have shape (n_samples, n_x, n_y) '''
X_unflat = X_grayscale_flat.reshape(n_samples,
image_dimensions[0],
image_dimensions[1])
return X_unflat
def fit(self, X, y=None):
'''
Fits a 1-component PCA on the distributions of colors of all the
pixels in the entire batch of images.
'''
X_flat = self._flatten(X)
self.pca.fit(X_flat)
return self
def transform(self, X, y=None):
'''
Finds a gray-scale approximation to a batch of images
using 1-component PCA in color space.
Parameters
----------
X: ndarray, shape (n_samples, x_dim, y_dim, n_color_channels)
Array of n_samples images, of size (x_dim, y_dim) with
n_color_channels
Returns
-------
X_grayscaled: ndarray, shape (n_samples, x_dim, y_dim)
Array of n_samples grayscale images of the same size as the
input X.
'''
image_dimensions = (X.shape[1], X.shape[2])
n_samples = X.shape[0]
X_flat = self._flatten(X)
X_grayscale_flat = self.pca.transform(X_flat)
X_grayscaled = self._unflatten(
X_grayscale_flat,
n_samples,
image_dimensions
)
return X_grayscaled
class Reshaper(BaseEstimator, TransformerMixin):
''' Reshapes a 2d array into a ndarray of a specified shape.
Attributes
----------
output_shape_ : tuple of int
shape of the output array
'''
def __init__(self, output_shape_):
self.output_shape_ = output_shape_
def fit(self, X, y=None):
'''Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence work in pipelines.
'''
assert X.shape[1] == np.prod(np.array(self.output_shape_)), ('output '
'size does not match input size')
return self
def transform(self, X, y=None):
''' Reshapes the array
Parameters
----------
X : ndarray, shape (n_samples, input_dim)
input data to be transformed
Returns
-------
X_reshaped: ndarray, shape (n_samples,) + self.output_shape
Reshaped array
'''
X_transformed_shape = (X.shape[0],) + self.output_shape_
return X.reshape(X_transformed_shape)
class Bettier(BaseEstimator, TransformerMixin):
'''Computes the Betti Numbers of the dark regions of a batch of images
Attributes
----------
threshold_ : float, optional
The transform method computes the Betti numbers of the region
formed by any pixel darker than `threshold`.
'''
def __init__(self, threshold_=.5):
self.threshold_ = threshold_
def fit(self, X, y=None):
'''Do nothing and return the estimator unchanged
This method is just there to implement the usual API
and hence work in pipelines.
'''
return self
def transform(self, X, y=None):
'''
Returns the betti numbers of the dark region of the images.
Parameters
----------
X : ndarray, shape (n_samples, n_x, n_y)
Batch of grayscale images.
Returns
-------
X_transformed : ndarry, shape (n_samples, 2)
Zeroeth and first Betti numbers of each image in the batch
'''
betti_numbers_list = [
betti_numbers(X[i, :, :], self.threshold_)[None,:]
for i in range(X.shape[0])
]
X_transformed = np.concatenate(betti_numbers_list, axis=0)
return X_transformed
``` |
{
"source": "jmann277/oura_cdm",
"score": 2
} |
#### File: oura_cdm/oura_cdm/extract_oura.py
```python
import json
import os
from datetime import date
from typing import Dict, List, Optional
from functools import partial
import requests
from oura_cdm.logs import log_info, log_warning
log_info_e = partial(log_info, **{'name': __name__})
log_warning_e = partial(log_warning, **{'name': __name__})
def get_oura_token_environment_variable_name() -> str:
return 'OURA_TOKEN'
def get_token() -> Optional[str]:
token_name = get_oura_token_environment_variable_name()
if token_name in os.environ.keys():
return os.environ['OURA_TOKEN']
return None
def _get_mock_oura_data() -> List[Dict]:
mock_data_folder = 'mocks/mock_sleep_data.json'
log_info_e(f'Getting mock data from {mock_data_folder}')
with open(mock_data_folder, 'rb') as f:
ans = json.load(f)
return ans
def get_oura_data(
start_date=None,
end_date=None
) -> List[Dict]:
token = get_token()
if token is None:
log_warning_e('No token provided running pipeline on mock data')
return _get_mock_oura_data()
if end_date is None:
end_date = date.today().strftime("%Y-%m-%d")
if start_date is None:
start_date = '2000-02-01'
log_info_e(f'Getting data from oura from {start_date} to {end_date}')
url = f'https://api.ouraring.com/v1/sleep?start={start_date}&end={end_date}'
ans = requests.get(url, params={"access_token": token})
if ans.status_code == 401:
log_warning_e('No token provided running pipeline on mock data')
return _get_mock_oura_data()[:0]
ans = ans.json()
log_info_e('Oura query successful')
return ans['sleep']
```
#### File: oura_cdm/tests/test_extract.py
```python
import os
import pytest
from oura_cdm.extract_oura import get_oura_data
@pytest.fixture(scope='session')
def observation_dates(oura_data):
return [
observations['summary_date']
for observations in oura_data
]
def test_oura_data_keys(oura_data):
assert {"light", "rem", "deep"}.issubset(set(oura_data[0].keys()))
def test_start_date(observation_dates, start_date):
for date in observation_dates:
assert start_date <= date
def test_end_date(observation_dates, end_date):
for date in observation_dates:
assert end_date >= date
@pytest.fixture(params=['asdfjkl'])
def invalid_token_env(request, monkeypatch):
token = request.param
if token is not None:
monkeypatch.setenv('OURA_TOKEN', token)
yield token
def test_all_env_variables(invalid_token_env):
data = get_oura_data(start_date='2022-01-17', end_date='2022-02-02')
if invalid_token_env is not None:
assert os.environ['OURA_TOKEN'] == invalid_token_env
assert len(data) == 0
``` |
{
"source": "jmann277/polar_h10_analysis_tools",
"score": 3
} |
#### File: polar_h10_analysis_tools/scr/wranglers.py
```python
import numpy as np
import pandas as pd
import os
class polar_h10_running_wrangler:
'''
Wrangles running data
Note that it considers anything slower than 60 min/mi as still.
'''
def __init__(self, filepath):
self.filepath = filepath
self.meta_df = self.wrangle_meta_df()
self.data_df = self.wrangle_data_df()
def wrangle_meta_df(self):
"""
Extracts and wrangle session metadata
"""
meta_df = pd.read_csv(self.filepath)[:1]
meta_df.dropna(axis=1, inplace=True)
meta_df['Date'] = pd.to_datetime(meta_df['Date'], format='%d-%m-%Y')
meta_df['Start time'] = pd.to_datetime(
meta_df['Start time'], infer_datetime_format=True)
meta_df['Duration'] = pd.to_timedelta(meta_df['Duration'])
meta_df.drop(columns=['Date'], inplace=True)
renaming_dict = {'Start time': 'Start Datetime'}
meta_df.rename(columns=renaming_dict, inplace=True)
meta_df.loc[0, 'Sport'] = meta_df.loc[0, 'Sport'].title()
meta_df.loc[0, 'Name'] = meta_df.loc[0, 'Name'].title()
return meta_df
def wrangle_data_df(self, pace_threshold=75):
'''
Extracts and wrangles the session data
'''
data_df = pd.read_csv(self.filepath, header=2)
data_df.dropna(axis=1, inplace=True)
data_df['Pace (min/mi)'] = '00:' + data_df['Pace (min/mi)']
data_df['Pace (min/mi)'] = pd.to_timedelta(
data_df['Pace (min/mi)']
).dt.total_seconds() / 60
data_df['Pace (min/mi)'] = np.round(
data_df['Pace (min/mi)'], decimals=1
)
data_df[data_df['Pace (min/mi)'] > pace_threshold] = 0
data = np.full(shape=data_df.index.shape,
fill_value=self.get_start_datetime())
start_datetime_series = pd.Series(data=data, index=data_df.index)
data_df['Time'] = pd.to_timedelta(
data_df['Time']) + start_datetime_series
data_df.set_index('Time', inplace=True)
return data_df
def get_activity(self):
activity = self.meta_df.loc[0, 'Sport'].lower()
return activity
def get_name(self):
name = self.meta_df.loc[0, 'Name'].replace(' ', '_').lower()
return name
def get_start_datetime(self):
start_datetime = self.meta_df.loc[0, 'Start Datetime']
return start_datetime
def save_wrangled_data(self):
'''
Saves the session data. Format is:
<date>_<start_time>_<activity>_<last_name>_<first_name>
'''
start_dt_str = self.get_start_datetime().strftime('%Y-%m-%d_%H:%M')
activity = self.get_activity()
name = self.get_name()
save_filename = '{}_{}_{}.csv'.format(start_dt_str, activity, name)
filepath = os.path.join('..', 'data', 'wrangled_data', save_filename)
self.data_df.to_csv(filepath)
``` |
{
"source": "jmannau/micropython-stubber",
"score": 2
} |
#### File: stubs/esp32_1_10_0/network.py
```python
"Module 'network' on firmware 'v1.10-247-g0fb15fc3f on 2019-03-29'"
AP_IF = 1
AUTH_MAX = 6
AUTH_OPEN = 0
AUTH_WEP = 1
AUTH_WPA2_PSK = 3
AUTH_WPA_PSK = 2
AUTH_WPA_WPA2_PSK = 4
ETH_CLOCK_GPIO0_IN = 0
ETH_CLOCK_GPIO16_OUT = 2
ETH_CLOCK_GPIO17_OUT = 3
def LAN():
pass
MODE_11B = 1
MODE_11G = 2
MODE_11N = 4
PHY_LAN8720 = 0
PHY_TLK110 = 1
def PPP():
pass
STAT_ASSOC_FAIL = 203
STAT_BEACON_TIMEOUT = 200
STAT_CONNECTING = 1001
STAT_GOT_IP = 1010
STAT_HANDSHAKE_TIMEOUT = 204
STAT_IDLE = 1000
STAT_NO_AP_FOUND = 201
STAT_WRONG_PASSWORD = <PASSWORD>
STA_IF = 0
def WLAN():
pass
def phy_mode():
pass
```
#### File: stubs/esp32_1_10_0/upip_utarfile.py
```python
"Module 'upip_utarfile' on firmware 'v1.10-247-g0fb15fc3f on 2019-03-29'"
DIRTYPE = 'dir'
class FileSection(): ...
def read():
pass
def readinto():
pass
def skip():
pass
REGTYPE = 'file'
TAR_HEADER = None
class TarFile(): ...
def extractfile():
pass
def next():
pass
class TarInfo(): ...
def roundup():
pass
uctypes = None
```
#### File: stubs/esp32_1_10_0/websocket_helper.py
```python
"Module 'websocket_helper' on firmware 'v1.10-247-g0fb15fc3f on 2019-03-29'"
DEBUG = 0
binascii = None
def client_handshake():
pass
hashlib = None
def server_handshake():
pass
sys = None
``` |
{
"source": "jmanosu/OS_Projects",
"score": 4
} |
#### File: OS_Projects/Learning_Python/mypython.py
```python
import random
def main():
#opens file 1 to write to and clears contents using trunk
f1 = open("file1", "w")
f1.truncate()
#generates a random string and writes it to file and prints it
temp = generateRanString()
f1.write(temp)
print(temp, end ="")
#same operations as file 1
f2 = open("file2", "w")
f2.truncate()
temp = generateRanString()
f2.write(temp)
print(temp, end ="")
#same operations as file 1
f3 = open("file3", "w")
f3.truncate()
temp = generateRanString()
f3.write(temp)
print(temp, end ="")
#generates two random INT's and prints them and their product
rInt1 = random.randint(1,42)
rInt2 = random.randint(1,42)
print(rInt1)
print(rInt2)
print(rInt1 * rInt2)
#random string generator
def generateRanString():
#output string
output = ""
for i in range(10):
#generates a random number between 94 and 122
rInt = random.randint(97,122)
#converts random number to ascii value (94 = a) and (122 = z)
output += chr(rInt)
#returns random string and adds new line character
return output + '\n'
#runs main function
main()
``` |
{
"source": "jmansilla-2014056/tytus",
"score": 2
} |
#### File: fase2/team24/ayuda.py
```python
import grammar2 as g
from reportTable import *
from InstruccionesDGA import tabla as ts
#print(g.funciones)
f = open("entrada.txt", "r")
a = open("c3d.py", "w")
a.write('''
from datetime import date
from variables import tabla as ts
from variables import NombreDB
from variables import cont
import tablaDGA as TAS
import sql as sql
import mathtrig as mt
from reportTable import *
pila = []
for i in range(100):
pila.append(i)
def ejecutar():
\tglobal cont
\tglobal ts
\tNombreDB = ts.nameDB
\n''')
input = f.read()
raiz = g.parse(input)
results = []
res =''
#executeGraphTree(raiz)
for val in raiz:
res += val.traducir()
#pass
a.write(res)
a.write('\tgraphTable(ts)\n')
for fa in g.funciones:
a.write(fa)
a.write('''ejecutar()''')
a.close()
```
#### File: fase2/team24/InstruccionesDGA.py
```python
import storage as func
import tablaDGA as TS
import reportError as errores
import mathtrig as mt
import hashlib
from datetime import date
from reportTable import *
from variables import cont
from variables import tabla
from variables import NombreDB
from procedural import llamadaF
#VARIABLES GLOBALES
resultadotxt = ""
contambito = 0
contregistro = 0
def Textoresultado():
for simbolo in tabla.simbolos:
print("ID: " + str(tabla.simbolos[simbolo].id) + " Nombre: " + tabla.simbolos[simbolo].nombre + " Ambito: " + str(tabla.simbolos[simbolo].ambito) + " Tipo indice: " + str(tabla.simbolos[simbolo].tipoind) + " Orden Indice: " + str(tabla.simbolos[simbolo].ordenind) + " Columna ind: " + str(tabla.simbolos[simbolo].columnaind) + " Tabla indice: " + str(tabla.simbolos[simbolo].tablaind))
print("\n")
class instruccion:
"""INSTRUCCION"""
"""RODUCCIONES GENERALES"""
class cond(instruccion):
def __init__(self,iden, signo,tipo):
self.iden = iden
self.signo = signo
self.tipo = tipo
class wherecond(instruccion):
def __init__(self,iden, tipo, tipo2):
self.iden = iden
self.tipo = tipo
self.tipo2 = tipo2
class wherecond1(instruccion):
def __init__(self,iden, tipo,signo):
self.iden = iden
self.tipo = tipo
self.signo = signo
class reservadatipo(instruccion):
def __init__(self,restipo,cantn):
self.restipo = restipo
self.cantn = cantn
"""MANIPULACION DE BASES DE DATOS"""
#CREATEDB----------------------------
class createdb(instruccion):
def __init__(self,replacedb,ifnotexists,iden,owner,mode):
self.replacedb = replacedb
self.ifnotexists = ifnotexists
self.iden = iden
self.owner = owner
self.mode = mode
def traducir(self):
#global traduccion
traduccion = '\t'
traduccion += 'sql.execute("CREATE DATABASE'
if self.ifnotexists != "":
traduccion += ' IF NOT EXISTS'
traduccion += ' '+self.iden
if self.owner != "":
traduccion += ' OWNER =' + self.owner
if self.mode != "":
traduccion += ' MODE =' + self.mode
traduccion += ';")'
return traduccion + '\n'
def ejecutar(self):
global resultadotxt
global cont
global tabla
global contambito
try:
resultado = func.createDatabase(self.iden)
if resultado == 0:
NuevoSimbolo = TS.Simbolo(cont,self.iden,TS.TIPO.DATABASE,contambito)
cont+=1
contambito += 1
tabla.agregar(NuevoSimbolo)
print("2 luego de ejecutar en DGA",id(tabla))
#resultadotxt += "Se creo la base de datos " + self.iden + "\n"
print("Se creo la base de datos " + self.iden + "\n")
return "Se creo la base de datos " + self.iden + "\n"
elif resultado == 2 and not self.replacedb:
e = errores.CError(0,0,"Ya existe la base de datos " + self.iden,'Semantico')
errores.insert_error(e)
resultadotxt += "Ya existe la base de datos " + self.iden + "\n"
print("Ya existe la base de datos " + self.iden + "\n")
return "Ya existe la base de datos " + self.iden + "\n"
elif resultado == 2 and self.replacedb:
func.dropDatabase(self.iden)
buscar = tabla.BuscarNombre(self.iden)
tabla.simbolos.pop(buscar.id)
func.createDatabase(self.iden)
NuevoSimbolo = TS.Simbolo(cont,self.iden,TS.TIPO.DATABASE,contambito)
cont+=1
contambito+=1
tabla.agregar(NuevoSimbolo)
resultadotxt += "Se reemplazo la base de datos: " + self.iden + "\n"
print("Se reemplazo la base de datos: " + self.iden + "\n")
return "Se reemplazo la base de datos: " + self.iden + "\n"
else:
e = errores.CError(0,0,"Error al crear base de datos: " + self.iden,'Semantico')
errores.insert_error(e)
resultadotxt += "Error al crear base de datos: " + self.iden + "\n"
print("Error al crear base de datos: " + self.iden + "\n")
return "Error al crear base de datos: " + self.iden + "\n"
except:
NuevoSimbolo = TS.Simbolo(cont,self.iden,TS.TIPO.DATABASE,contambito)
cont+=1
contambito += 1
tabla.agregar(NuevoSimbolo)
print("2 luego de ejecutar en DGA",id(tabla))
"""ERROR SEMANTICO"""
#SHOWDB----------------------------------
class showdb(instruccion):
def __init__(self,nombre):
self.nombre = nombre
def traducir(self):
traduccion = '\t'
traduccion += 'sql.execute("SHOW DATABASES '+ self.nombre + ';")'
traduccion += '\n'
return traduccion
def ejecutar(self):
global resultadotxt
global cont
global tabla
contador = 0
try:
resultado = func.showDatabases()
if len(resultado) > 0:
resultadotxt += "\nBases de datos existentes:\n"
resp =""
resp += "\nBases de datos existentes:\n"
for base in resultado:
resultadotxt += str(contador) + ". " + base + "\n"
resp += str(contador) + ". " + base + "\n"
contador += 1
print(resp)
return resp
else:
resultadotxt += "No existen bases de datos"
print("No existen bases de datos")
return "No existen bases de datos"
except:
"""ERROR SEMANTICO"""
#ALTERDB------------------------------------
class alterdb(instruccion):
def __init__(self,alterdb2):
self.alterdb2 = alterdb2
def traducir(self):
traduccion = '\t'
traduccion += 'sql.execute("ALTER DATABASE'
if self.alterdb2 != None:
traduccion += ' ' + self.alterdb2.iden
if self.alterdb2.alterdb3 != None:
traduccion += ' RENAME TO ' + self.alterdb2.alterdb3.iden
traduccion += ';")'
traduccion += '\n'
return traduccion
def ejecutar(self):
global resultadotxt
global cont
global tabla
try:
if self.alterdb2.iden != "" and self.alterdb2.alterdb3.iden != "":
resultado = func.alterDatabase(self.alterdb2.iden, self.alterdb2.alterdb3.iden)
if resultado == 2:
e = errores.CError(0,0,"No existe la base de datos " + self.alterdb2.iden,'Semantico')
errores.insert_error(e)
resultadotxt += "No existe la base de datos " + self.alterdb2.iden + "\n"
print("No existe la base de datos " + self.alterdb2.iden + "\n")
return "No existe la base de datos " + self.alterdb2.iden + "\n"
if resultado == 3:
e = errores.CError(0,0,"Ya existe la base de datos " + self.alterdb2.alterdb3.iden,'Semantico')
errores.insert_error(e)
resultadotxt += "Ya existe la base de datos " + self.alterdb2.alterdb3.iden + "\n"
print("Ya existe la base de datos " + self.alterdb2.alterdb3.iden + "\n")
return "Ya existe la base de datos " + self.alterdb2.alterdb3.iden + "\n"
else:
buscar = tabla.BuscarNombre(self.alterdb2.iden)
buscar.nombre = self.alterdb2.alterdb3.iden
tabla.actualizar(buscar)
resultadotxt += "Se actualizo la base de datos " + self.alterdb2.iden + " a " + self.alterdb2.alterdb3.iden + "\n"
print("Se actualizo la base de datos " + self.alterdb2.iden + " a " + self.alterdb2.alterdb3.iden + "\n")
return "Se actualizo la base de datos " + self.alterdb2.iden + " a " + self.alterdb2.alterdb3.iden + "\n"
except:
"""ERROR SEMANTICO"""
class alterdb2(instruccion):
def __init__(self,iden, alterdb3):
self.iden = iden
self.alterdb3 = alterdb3
class alterdb21(instruccion):
def __init__(self,iden):
self.iden = iden
class alterdb3(instruccion):
def __init__(self,iden):
self.iden = iden
class alterdb31(instruccion):
def __init__(self,iden, iden2, iden3):
self.iden = iden
self.iden2 = iden2
self.iden3 = iden3
#DROPDB--------------------------------------
class dropdb(instruccion):
def __init__(self,ifexists, iden):
self.ifexists = ifexists
self.iden =iden
def traducir(self):
traduccion = '\t'
traduccion += 'sql.execute("DROP DATABASE'
if self.ifexists != "":
traduccion += ' IF EXISTS'
traduccion += ' ' + self.iden
traduccion += ';)"'
traduccion += '\n'
return traduccion
def ejecutar(self):
global NombreDB
global resultadotxt
global cont
global tabla
try:
resultado = func.dropDatabase(self.iden)
if(resultado == 2):
e = errores.CError(0,0,"No existe la base de datos " + str(self.iden),'Semantico')
errores.insert_error(e)
resultadotxt += "No existe la base de datos " + self.iden + "\n"
print("No existe la base de datos " + self.iden + "\n")
return "No existe la base de datos " + self.iden + "\n"
else:
BaseDatos = tabla.BuscarNombre(self.iden)
eliminar = []
for simbolo in tabla.simbolos:
if tabla.simbolos[simbolo].ambito == BaseDatos.id and not tabla.simbolos[simbolo].tipo == TS.TIPO.DATABASE:
TablaExistente = tabla.simbolos[simbolo]
eliminar.append(TablaExistente)
for simbolo2 in tabla.simbolos:
if tabla.simbolos[simbolo2].ambito == TablaExistente.id and not tabla.simbolos[simbolo2].tipo == TS.TIPO.DATABASE and not tabla.simbolos[simbolo2].tipo == TS.TIPO.TABLE:
eliminar.append(tabla.simbolos[simbolo2])
for element in eliminar:
tabla.simbolos.pop(element.id)
tabla.simbolos.pop(BaseDatos.id)
if self.iden == NombreDB:
NombreDB = ""
resultadotxt += "Se elimino la base de datos " + self.iden + "\n"
print("Se elimino la base de datos " + self.iden + "\n")
return "Se elimino la base de datos " + self.iden + "\n"
except:
"""ERROR SEMANTICO"""
#USEDB----------------------------------------
class usedb(instruccion):
def __init__(self, iden):
self.iden =iden
def traducir(self):
traduccion = '\t'
traduccion += 'sql.execute("USE DATABASE '+ self.iden
traduccion += ';")'
traduccion += '\n'
traduccion += '\tNombreDB = ts.nameDB\n'
return traduccion
def ejecutar(self):
global resultadotxt
global NombreDB
global tabla
tabla.nameDB = self.iden
NombreDB = self.iden
resultadotxt += "Usando la base de datos " + self.iden + "\n"
print("Usando la base de datos " + self.iden + "\n")
return "Usando la base de datos " + self.iden + "\n"
#MANIPULACION DE TABLAS
#CREATE TABLE---------------------------------------
class createtb(instruccion):
def __init__(self,iden, coltb, inherits):
self.iden = iden
self.coltb = coltb
self.inherits = inherits
def traducir(self):
traduccion = '\t'
traduccion += 'sql.execute("CREATE TABLE '+ self.iden +'('
for column in self.coltb:
if isinstance(column, columna):
if isinstance(column.tipo, str):
traduccion += column.iden + ' ' + column.tipo
elif isinstance(column.tipo, reservadatipo):
traduccion += column.iden + ' ' + str(column.tipo.restipo)
if (column.tipo.cantn is not None):
traduccion += '(' + str(column.tipo.cantn) +')'
if column.notnull != "":
traduccion += ' ' + 'NOT NULL'
if str('PRIMARY KEY').lower() in str(column.key).lower():
traduccion += ' ' + 'PRIMARY KEY'
traduccion += ','
traduccion += ');")'
traduccion = traduccion.replace(',)',')')
traduccion += '\n'
#self.ejecutar()
return traduccion
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
resultadotxt = ""
try:
resultado = func.createTable(NombreDB, self.iden,0)
if(resultado == 2):
e = errores.CError(0,0,"No existe la base de datos: " + NombreDB,'Semantico')
errores.insert_error(e)
resultadotxt += "No existe la base de datos: " + NombreDB + "\n"
elif(resultado == 3):
e = errores.CError(0,0,"La tabla ya existe: " + self.iden,'Semantico')
errores.insert_error(e)
resultadotxt += "La tabla ya existe: " + self.iden + "\n"
else:
buscar = tabla.BuscarNombre(NombreDB)
NuevoSimbolo = TS.Simbolo(cont,self.iden,TS.TIPO.TABLE,buscar.id,0)
cont+=1
tabla.agregar(NuevoSimbolo)
"""SE CREAN LAS COLUMNAS PARA LA TABLA"""
inicio = 0
for columna in self.coltb:
try:
if "primary key " in columna.key.lower():
NuevaColumna = TS.Simbolo(cont,columna.iden,TS.TIPO.COLUMN,NuevoSimbolo.id,0,columna.tipo,1,columna.references,columna.default,False,columna.constraint,inicio)
listacol = []
listacol.append(NuevaColumna.numcol)
resultado2 = func.alterAddColumn(NombreDB,self.iden,columna)
resultado = func.alterAddPK(NombreDB,NuevoSimbolo.nombre,listacol)
else:
NuevaColumna = TS.Simbolo(cont,columna.iden,TS.TIPO.COLUMN,NuevoSimbolo.id,0,columna.tipo,0,columna.references,columna.default,False,columna.constraint,inicio)
resultado = func.alterAddColumn(NombreDB,self.iden,columna)
if resultado == 2:
e = errores.CError(0,0,"No existe la base de datos " + NombreDB,'Semantico')
errores.insert_error(e)
resultadotxt += "No existe la base de datos " + NombreDB + "\n"
elif resultado == 3:
e = errores.CError(0,0,"No existe la tabla " + self.iden,'Semantico')
errores.insert_error(e)
resultadotxt += "No existe la tabla " + self.iden + "\n"
elif resultado == 4:
e = errores.CError(0,0,"Ya existe una llave primaria en " + self.iden,'Semantico')
errores.insert_error(e)
resultadotxt += "Ya existe una llave primaria en " + self.iden + "\n"
else:
if columna.notnull.lower() == "not null":
NuevaColumna.nullcol = True
else:
NuevaColumna.nullcol = False
cont+=1
inicio+=1
NuevoSimbolo.coltab+=1
tabla.actualizar(NuevoSimbolo)
tabla.agregar(NuevaColumna)
resultadotxt += "Se agrego la columna " + columna.iden + " a la tabla " + self.iden + "\n"
except:
"""ERROR SEMANTICO"""
if self.inherits != "":
TablaInherits = tabla.BuscarNombre(self.inherits)
if TablaInherits:
ColumnasInherits = []
for simbolo in tabla.simbolos:
if tabla.simbolos[simbolo].ambito == TablaInherits.id and tabla.simbolos[simbolo].tipo == TS.TIPO.COLUMN :
ColumnasInherits.append(tabla.simbolos[simbolo])
#AGREGAR COLUMNAS DE INHERITS A TABLA
for columna in ColumnasInherits:
try:
if columna.llavecol == 1:
NuevaColumna = TS.Simbolo(cont,columna.nombre,TS.TIPO.COLUMN,NuevoSimbolo.id,0,columna.tipocol,columna.llavecol,columna.refcol,columna.defcol,columna.nullcol,columna.constcol,inicio)
listacol = []
listacol.append(NuevaColumna.numcol)
resultado2 = func.alterAddColumn(NombreDB,self.iden,columna)
resultado = func.alterAddPK(NombreDB,NuevoSimbolo.nombre,listacol)
else:
NuevaColumna = TS.Simbolo(cont,columna.nombre,TS.TIPO.COLUMN,NuevoSimbolo.id,0,columna.tipocol,columna.llavecol,columna.refcol,columna.defcol,columna.nullcol,columna.constcol,inicio)
resultado = func.alterAddColumn(NombreDB,self.iden,columna)
if resultado == 2:
e = errores.CError(0,0,"No existe la base de datos " + NombreDB,'Semantico')
errores.insert_error(e)
resultadotxt += "No existe la base de datos " + NombreDB + "\n"
elif resultado == 3:
e = errores.CError(0,0,"No existe la tabla " + self.iden,'Semantico')
errores.insert_error(e)
resultadotxt += "No existe la tabla " + self.iden + "\n"
elif resultado == 4:
e = errores.CError(0,0,"Ya existe una llave primaria en " + self.iden,'Semantico')
errores.insert_error(e)
resultadotxt += "Ya existe una llave primaria en " + self.iden + "\n"
else:
cont+=1
inicio+=1
NuevoSimbolo.coltab+=1
tabla.actualizar(NuevoSimbolo)
tabla.agregar(NuevaColumna)
resultadotxt += "Se agrego la columna " + columna.nombre + " a la tabla " + self.iden + "\n"
except:
"""ERROR SEMANTICO"""
else:
e = errores.CError(0,0,"No existe la tabla " + self.inherits,'Semantico')
errores.insert_error(e)
resultadotxt += "No existe la tabla " + self.inherits + "\n"
resultadotxt += "Se creo la tabla: " + self.iden + " En la base de datos: " + NombreDB + "\n"
except:
"""ERROR SEMANTICO"""
print(resultadotxt)
return resultadotxt
class columna(instruccion):
def __init__(self,iden, tipo, notnull, key, references, default, constraint):
self.iden = iden
self.tipo = tipo
self.notnull = notnull
self.key = key
self.references = references
self.default = default
self.constraint = constraint
#DROP TABLE--------------------------------------
class droptb(instruccion):
def __init__(self,iden):
self.iden = iden
def traducir(self):
traduccion = '\t'
traduccion += 'sql.execute("DROP TABLE '+ self.iden + ';")'
traduccion += '\n'
return traduccion
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
resultadotxt = ""
try:
resultado = func.dropTable(NombreDB, self.iden)
if(resultado == 2):
e = errores.CError(0,0,"No existe la base de datos " + NombreDB,'Semantico')
errores.insert_error(e)
resultadotxt += "No existe la base de datos " + NombreDB + "\n"
elif(resultado == 3):
e = errores.CError(0,0,"La tabla " + self.iden + " no existe en " + NombreDB,'Semantico')
errores.insert_error(e)
resultadotxt += "La tabla " + self.iden + " no existe en " + NombreDB + "\n"
else:
buscar = tabla.BuscarNombre(self.iden)
eliminar = []
for simbolo in tabla.simbolos:
if tabla.simbolos[simbolo].ambito == buscar.id and not tabla.simbolos[simbolo].tipo == TS.TIPO.DATABASE and not tabla.simbolos[simbolo].tipo == TS.TIPO.TABLE:
eliminar.append(tabla.simbolos[simbolo])
for element in eliminar:
tabla.simbolos.pop(element.id)
tabla.simbolos.pop(buscar.id)
resultadotxt += "Se elimino la tabla: " + self.iden + " de la base de datos: " + NombreDB + "\n"
except:
"""ERROR SEMANTICO"""
print(resultadotxt)
return resultadotxt
#ALTER TABLE-------------------------------------
class altertb(instruccion):
def __init__(self,iden, altertb2):
self.iden = iden
self.altertb2 = altertb2
def traducir(self):
traduccion = ''
for alteracion in self.altertb2:
subtraduccion = '\t' + 'sql.execute("ALTER TABLE '+ self.iden + ' '
#Este es un Add
if isinstance(alteracion, alteracion11):
subtraduccion += ' ' + alteracion.texto + ' '
if isinstance(alteracion.addprop, addprop):
temp = alteracion.addprop
subtraduccion += temp.texto
if isinstance(temp.lista, columna):
temp2 = temp.lista
subtraduccion += ' ' + temp2.iden + ' '
if isinstance(temp2.tipo, str):
subtraduccion += ' ' + temp2.tipo + ' '
elif isinstance(temp2.tipo, reservadatipo):
subtraduccion += temp2.iden + ' ' + str(temp2.tipo.restipo)
if (temp2.tipo.cantn is not None):
subtraduccion += '(' + str(temp2.tipo.cantn) +')'
subtraduccion += ';")'
subtraduccion += '\n'
traduccion += subtraduccion
#Este es un drop
if isinstance(alteracion, alteracion1):
subtraduccion = '\t' + 'sql.execute("ALTER TABLE '+ self.iden + ' '
subtraduccion += ' ' + alteracion.texto + ' ' + alteracion.iden + ' '
subtraduccion += ';")'
subtraduccion += '\n'
traduccion += subtraduccion
return traduccion
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
resultadotxt = ""
for alteracion in self.altertb2:
try:
if alteracion.texto and alteracion.texto.lower() == "add":
if alteracion.addprop.texto and alteracion.addprop.texto.lower() == "column":
NuevaColumna = alteracion.addprop.lista
try:
resultado = func.alterAddColumn(NombreDB,self.iden,NuevaColumna.iden)
if resultado == 2:
e = errores.CError(0,0,"No existe la base de datos " + NombreDB,'Semantico')
errores.insert_error(e)
resultadotxt += "No existe la base de datos " + NombreDB + "\n"
elif resultado == 3:
e = errores.CError(0,0,"No existe la tabla " + self.iden,'Semantico')
errores.insert_error(e)
resultadotxt += "No existe la tabla " + self.iden + "\n"
else:
BuscarTabla = tabla.BuscarNombre(self.iden)
BuscarTabla.coltab+=1
tabla.actualizar(BuscarTabla)
NuevoSimboloColumna = TS.Simbolo(cont,NuevaColumna.iden,TS.TIPO.COLUMN,BuscarTabla.id,0,NuevaColumna.tipo,0,"","",False,"",(BuscarTabla.coltab-1))
cont+=1
tabla.agregar(NuevoSimboloColumna)
resultadotxt += "Se agrego la columna " + NuevoSimboloColumna.nombre + " a la tabla " + self.iden + "\n"
except:
"""ERROR SEMANTICO"""
if alteracion.texto and alteracion.texto.lower() == "drop column":
try:
ColumnaABorrar = tabla.BuscarNombre(alteracion.iden)
resultado = func.alterDropColumn(NombreDB,self.iden,ColumnaABorrar.numcol)
if resultado == 2:
e = errores.CError(0,0,"La base de datos " + NombreDB + " No existe",'Semantico')
errores.insert_error(e)
resultadotxt += "La base de datos " + NombreDB + " No existe \n"
elif resultado == 3:
e = errores.CError(0,0,"No se encontro la tabla " + self.iden + " en la base de datos " + NombreDB,'Semantico')
errores.insert_error(e)
resultadotxt += "No se encontro la tabla " + self.iden + " en la base de datos " + NombreDB + "\n"
elif resultado == 4:
e = errores.CError(0,0,"La columna " + ColumnaABorrar.nombre + " Es llave primaria",'Semantico')
errores.insert_error(e)
resultadotxt += "La columna " + ColumnaABorrar.nombre + " Es llave primaria" + "\n"
elif resultado == 5:
e = errores.CError(0,0,"La columna " + ColumnaABorrar.nombre + " No existe",'Semantico')
errores.insert_error(e)
resultadotxt += "La columna " + ColumnaABorrar.nombre + " No existe" + "\n"
else:
tabla.simbolos.pop(ColumnaABorrar.id)
OrdenarColumnas(self.iden)
resultadotxt += "Se elimino la columna " + ColumnaABorrar.nombre + " de la tabla " + self.iden + "\n"
except:
"""ERROR SEMANTICO"""
except:
"""ERROR"""
print(resultadotxt)
return resultadotxt
def OrdenarColumnas(NombreTabla):
TablaActual = tabla.BuscarNombre(NombreTabla)
ListaColumnas = []
for simbolo in tabla.simbolos:
if tabla.simbolos[simbolo].ambito == TablaActual.id and tabla.simbolos[simbolo].tipo == TS.TIPO.COLUMN:
ListaColumnas.append(tabla.simbolos[simbolo])
contador = 0
for columna in ListaColumnas:
columna.numcol = contador
contador+=1
tabla.actualizar(columna)
class alteracion1(instruccion):
def __init__(self,texto, iden):
self.texto = texto
self.iden = iden
class alteracion11(instruccion):
def __init__(self,texto, addprop):
self.texto = texto
self.addprop = addprop
class addprop(instruccion):
def __init__(self,texto, lista):
self.texto = texto
self.lista = lista
class alter(instruccion):
def __init__(self,iden, propaltcol):
self.iden = iden
self.propaltcol = propaltcol
class alteracion11111(instruccion):
def __init__(self,texto, iden, colkey):
self.iden = iden
self.texto = texto
self.colkey = colkey
#MANIPULACION DE DATOS
#INSERT-------------------------------------
class insert(instruccion):
def __init__(self,iden, valores):
self.iden = iden
self.valores = valores
def traducir(self):
c3d = ''
traduccion = ''
traduccion += '\tsql.execute("INSERT INTO '+ self.iden + ' VALUES('
for v in self.valores:
if isinstance(v, llamadaF):
print(v)
c = v.traducir()
c3d += '\t'+str(c[0]).replace('\n','\n\t')
c3d += '\n'
traduccion += "\"+"+str(c[1])+ "+\","
else:
if isinstance(v , (int, float, complex)):
traduccion += str(v) + ","
elif isinstance(v, str):
traduccion += "'"+ v + "'" + ","
elif isinstance(v, bool):
traduccion += str(v) + ","
elif "ejecutar" in dir(v) :
traduccion += str(v.ejecutar()) + ","
traduccion = traduccion.replace(",)",")")
traduccion += ');")'
traduccion += '\n'
c3d += traduccion
return c3d.replace(',)',')')
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
global contregistro
resultadotxt = ""
try:
columnasdetabla = []
tablas = tabla.BuscarNombre(self.iden)
if not tablas:
e = errores.CError(0,0,"No existe la tabla " + self.iden,'Semantico')
errores.insert_error(e)
return "No existe la tabla " + self.iden + "\n"
for simbolo in tabla.simbolos:
if tabla.simbolos[simbolo].ambito == tablas.id and not tabla.simbolos[simbolo].tipo == TS.TIPO.DATABASE and not tabla.simbolos[simbolo].tipo == TS.TIPO.TABLE and not tabla.simbolos[simbolo].tipo == TS.TIPO.TUPLA:
columnasdetabla.append(tabla.simbolos[simbolo])
colcorrecta = []
iter = 0
for columna in columnasdetabla:
if VerificarTipo(columna.tipocol, self.valores[iter]):
try:
if self.valores[iter].exp:
valcorrecto = self.valores[iter].ejecutar()
if isinstance(valcorrecto,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
else:
colcorrecta.append(str(valcorrecto))
except:
try:
if self.valores[iter].exp1:
valcorrecto = self.valores[iter].ejecutar()
if isinstance(valcorrecto,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
else:
colcorrecta.append(str(valcorrecto))
except:
colcorrecta.append(self.valores[iter])
else:
resultadotxt += "El tipo de valor no coincide con la columna"
iter+=1
resultado = func.insert(NombreDB,self.iden,colcorrecta)
if resultado == 2:
e = errores.CError(0,0,"No existe la base de datos " + NombreDB,'Semantico')
errores.insert_error(e)
resultadotxt += "No existe la base de datos " + NombreDB + "\n"
elif resultado == 3:
e = errores.CError(0,0,"No existe la base tabla " + NombreDB,'Semantico')
errores.insert_error(e)
resultadotxt += "No existe la base tabla " + NombreDB + "\n"
elif resultado == 5:
e = errores.CError(0,0,"La cantidad de valores no coincide con la cantidad de columnas",'Semantico')
errores.insert_error(e)
resultadotxt += "La cantidad de valores no coincide con la cantidad de columnas\n"
else:
nombrereg = "registro" + str(contregistro)
NuevoRegistro = TS.Simbolo(cont,nombrereg,TS.TIPO.TUPLA,tablas.id,0,"",0,"","",False,"",0,colcorrecta)
contregistro+=1
cont+=1
tabla.agregar(NuevoRegistro)
resultadotxt += "El registro " + nombrereg + " fue agregado a la tabla " + self.iden + "\n"
except:
"""ERRORES SEMANTICOS"""
print(resultadotxt)
return resultadotxt
#FUNCIONES MATH
class funcionesmath():
'Abstract Class'
class math_abs2(funcionesmath):
def __init__(self, exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
try:
num = float(self.exp)
return abs(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_cbrt2(funcionesmath):
def __init__(self, exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
try:
num = float(self.exp)
return mt.cbrt(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_ceil2(funcionesmath):
def __init__(self, exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
try:
num = float(self.exp)
return mt.ceil(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_degrees2(funcionesmath):
def __init__(self, exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
try:
num = float(self.exp)
return mt.degrees(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_div2(funcionesmath):
def __init__(self, exp1, exp2):
self.exp1 = exp1
self.exp2 = exp2
def ejecutar(self):
try:
num1 = float(self.exp1)
num2 = float(self.exp2)
return mt.div(num1 , num2)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_exp2(funcionesmath):
def __init__(self,exp):
self.exp = exp
def ejecutar(self):
try:
num = int(self.exp)
return mt.exp(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_factorial2(funcionesmath):
def __init__(self, exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
try:
num = int(self.exp)
return mt.factorial(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_floor2(funcionesmath):
def __init__(self, exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
try:
num = float(self.exp)
return mt.floor(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_gcd2(funcionesmath):
def __init__(self, exp1, exp2):
self.exp1 = exp1
self.exp2 = exp2
def ejecutar(self):
try:
num1 = int(self.exp1)
num2 = int(self.exp2)
return mt.gcd(num1,num2)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_lcm2(funcionesmath):
def __init__(self,exp1,exp2):
self.exp1 = exp1
self.exp2 = exp2
def ejecutar(self):
try:
num1 = int(self.exp1)
num2 = int(self.exp2)
return mt.lcm(num1,num2)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_ln2(funcionesmath):
def __init__(self, exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
try:
num = float(self.exp)
return mt.ln(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_log2(funcionesmath):
def __init__(self, exp1, exp2):
self.exp1 = exp1
self.exp2 = exp2
def ejecutar(self):
try:
num1 = int(self.exp1)
num2 = int(self.exp2)
return mt.log(num1,num2)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_log102(funcionesmath):
def __init__(self, exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
num = float(self.exp)
return mt.log10(num)
class math_min_scale2(funcionesmath):
def __init__(self,exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
try:
num = int(self.exp)
return mt.min_scale(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_scale2(funcionesmath):
def __init__(self,exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
try:
return mt.scale(str(self.exp))
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_mod2(funcionesmath):
def __init__(self, exp1,exp2):
self.exp1 = exp1
self.exp2 = exp2
def ejecutar(self):
try:
num1 = float(self.exp1)
num2 = float(self.exp2)
return mt.mod(num1,num2)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_pi2(funcionesmath):
def __init__(self):
self.val = mt.pi()
def ejecutar(self):
try:
return self.val
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_power2(funcionesmath):
def __init__(self, exp1, exp2):
self.exp1 = exp1
self.exp2 = exp2
def ejecutar(self):
try:
num1 = int(self.exp1)
num2 = int(self.exp2)
return mt.power(num1,num2)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_radians2(funcionesmath):
def __init__(self, exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
try:
num = float(self.exp)
return mt.radians(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_round2(funcionesmath):
def __init__(self, exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
try:
num = float(self.exp)
return round(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_sign2(funcionesmath):
def __init__(self, exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
try:
num = float(self.exp)
return mt.sign(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_sqrt2(funcionesmath):
def __init__(self, exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
try:
num = float(self.exp)
return mt.sqrt(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_trim_scale2(funcionesmath):
def __init__(self,exp):
self.exp = exp
def ejecutar(self):
try:
num = int(self.exp)
return mt.trim_scale(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_widthBucket2(funcionesmath):
def __init__(self, exp1, exp2, exp3, exp4):
self.exp1 = exp1
self.exp2 = exp2
self.exp3 = exp3
self.exp4 = exp4
def ejecutar(self):
#xd
try:
return mt.width_bucket(9,8,7,6)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_trunc2(funcionesmath):
def __init__(self, exp):
self.exp = exp
def ejecutar(self):
#no es diccionario
try:
num = float(self.exp)
return mt.trunc(num)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
class math_random2(funcionesmath):
def __init__(self):
"""VACIO"""
def ejecutar(self):
return mt.random()
class math_setseed2(funcionesmath):
def __init__(self,exp):
self.exp = exp
def ejecutar(self):
try:
mt.setseed(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion matematica",'Semantico')
errores.insert_error(e)
return e
#FUNCIONES TRIGONOMETRICAS
class funcionestrig():
'Abstract Class'
class trig_acos2(funcionestrig):
def __init__(self, exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.acos(float(temp))
return trim
class trig_acosd2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.acosd(float(temp))
return trim
class trig_asin2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.asin(float(temp))
return trim
class trig_asind2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.asind(float(temp))
return trim
class trig_atan2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.atan(float(temp))
return trim
class trig_atand2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.atand(float(temp))
return trim
class trig_atan22(funcionestrig):
def __init__(self, exp1, exp2 ):
self.exp1 = exp1
self.exp2 = exp2
def ejecutar(self):
try:
temp1 = float(self.exp1)
temp2 = float(self.exp2)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.atan2(temp1,temp2)
return trim
class trig_atan2d2(funcionestrig):
def __init__(self, exp1, exp2 ):
self.exp1 = exp1
self.exp2 = exp2
def ejecutar(self):
try:
temp1 = float(self.exp1)
temp2 = float(self.exp2)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.atan2d(temp1,temp2)
return trim
class trig_cos2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.cos(float(temp))
return trim
class trig_cosd2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.cosd(float(temp))
return trim
class trig_cot2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.cot(float(temp))
return trim
class trig_cotd2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.cotd(float(temp))
return trim
class trig_sin2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.sin(float(temp))
return trim
class trig_sind2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.sind(float(temp))
return trim
class trig_tan2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.tan(float(temp))
return trim
class trig_tand2(funcionestrig):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.tand(float(temp))
return trim
class trig_sinh2(funcionestrig):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.sinh(float(temp))
return trim
class trig_cosh2(funcionestrig):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.cosh(float(self.exp))
return trim
class trig_tanh2(funcionestrig):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.tanh(float(temp))
return trim
class trig_asinh2(funcionestrig):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.asinh(float(temp))
return trim
class trig_acosh2(funcionestrig):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.acosh(float(temp))
return trim
class trig_atanh2(funcionestrig):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.atanh(float(temp))
return trim
#FUNCIONES GENERALES
class funciongen():
'clase Abstracta'
class fun_length2(funciongen):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
#saco el substring y lo devuelvo
temp = str(self.exp )
trim = len(temp)
return trim
class fun_trim2(funciongen):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
temp = str(self.exp)
trim = temp.strip()
return trim
class fun_md52(funciongen):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
#saco el substring y lo devuelvo
temp = str(self.exp )
crypt = hashlib.md5()
crypt.update(temp.encode('utf-8'))
r = crypt.hexdigest()
return r
class fun_sha2562(funciongen):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
#saco el substring y lo devuelvo
temp = str(self.exp)
crypt = hashlib.sha256()
crypt.update(temp.encode('utf-8'))
r = crypt.hexdigest()
return r
class fun_substr2(funciongen):
def __init__ (self,exp,min,max):
self.exp = exp
self.min = min
self.max = max
def ejecutar(self):
#saco el substring y lo devuelvo
temp = str(self.exp)
sub = temp[self.min:self.max]
return sub
class fun_greatest2(funciongen):
def __init__ (self,lexps):
self.lexps = lexps
def ejecutar(self):
try:
maximo = float(self.lexps[0])
for dato in self.lexps:
temp = float(dato)
if maximo < temp:
maximo = temp
return maximo
except:
e = errores.CError(0,0,"Funcion least necesita una lista",'Semantico')
errores.insert_error(e)
return e
class fun_least2(funciongen):
def __init__ (self,lexps):
self.lexps = lexps
def ejecutar(self):
try:
maximo = float(self.lexps[0])
for dato in self.lexps:
temp = float(dato)
if maximo > temp:
maximo = temp
return maximo
except:
e = errores.CError(0,0,"Funcion least necesita una lista",'Semantico')
errores.insert_error(e)
return e
class dato2(funciongen):
def __init__ (self,val):
self.val = val
class fun_now2(funciongen):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
# dd/mm/YY
today = date.today()
d1 = today.strftime("%Y-%m-%d %H:%M:%S")
return d1
def VerificarTipo(TipoColumna,ValorColumna):
"""try:
if float(ValorColumna):
TipoRegistro = definir_tipo(float(ValorColumna))
elif int(ValorColumna):
TipoRegistro = definir_tipo(int(ValorColumna))
except:
TipoRegistro = definir_tipo(ValorColumna)
if TipoRegistro == "smallint" and TipoColumna == "integer":
TipoRegistro = "integer"
try:
if TipoColumna.restipo.lower() == TipoRegistro:
return True
else:
return False
except:
if TipoColumna.lower() == TipoRegistro:
return True
else:
return False"""
return True
def definir_tipo(entrada):
"""if isinstance(entrada,int) or isinstance(entrada,float):
if entrada < 32767 and entrada > -32768:
return "smallint"
elif entrada < 214783648 and entrada > -214783648:
return "integer"
elif entrada < 9223372036854775808 and entrada > -9223372036854775808:
return "bigint"
elif entrada < 92233720368547758.08 and entrada > -92233720368547758.08 :
return "money"
else:
return "decimal"
elif isinstance(entrada,bool):
return "boolean"
else:
g = entrada.count('-')
dp = entrada.count(':')
if len(entrada) == 1:
return "char"
elif g == 3 and dp == 3:
return "time"
elif g == 3 and dp == 0:
return "date"
else:
return "varchar"""
#UPDATE-----------------------------------------
class update(instruccion):
def __init__(self,iden, cond, wherecond):
self.iden = iden
self.cond = cond
self.wherecond = wherecond
def traducir(self):
traduccion = '\t'
traduccion += 'sql.execute("UPDATE'
traduccion += ' ' + self.iden
NombreColumna = self.cond.iden
traduccion += ' SET ' + NombreColumna
traduccion += ' = '
if isinstance(self.cond.tipo , (int, float, complex)):
traduccion += str(self.cond.tipo)
elif isinstance(self.cond.tipo , str):
traduccion += "'" + self.cond.tipo + "'"
elif isinstance(self.cond.tipo, bool):
traduccion += str(self.cond.tipo )
else:
try:
temp = self.cond.tipo.ejecutar()
if isinstance(temp, (int, float, complex)):
traduccion += str(temp)
elif isinstance(temp, str):
traduccion += temp
elif isinstance(temp, bool):
traduccion += str(temp)
except:
'''error'''
traduccion += ' WHERE '
tempwherw = self.wherecond
if isinstance(tempwherw,wherecond1):
traduccion += ' ' + tempwherw.iden
traduccion += ' ' + tempwherw.signo
if isinstance(tempwherw.tipo, str):
traduccion += " '" + tempwherw.tipo + "'"
elif isinstance(tempwherw.tipo, (int, float, complex)):
traduccion += ' ' + str(tempwherw.tipo)
if "ejecutar" in dir(self.wherecond.tipo):
traduccion += ' ' + str(self.wherecond.tipo.ejecutar())
if isinstance(tempwherw, wherecond):
traduccion += ' ' + tempwherw.iden + ' BETWEEN'
try:
traduccion += ' ' + str(tempwherw.tipo.ejecutar())
except:
traduccion += ' ' + tempwherw.tipo
traduccion += ' AND '
try:
traduccion += ' ' + str(tempwherw.tipo2.ejecutar()) + ' '
except:
traduccion += ' ' + str(tempwherw.tipo2) + ' '
traduccion += ';")'
traduccion += '\n'
return traduccion
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
resultadotxt = ""
try:
TuplasTabla = []
ColumnasTabla = []
#OBTENER LAS TUPLAS Y COLUMNAS DE LA TABLA
TablaActual = tabla.BuscarNombre(self.iden)
for simbolo in tabla.simbolos:
if tabla.simbolos[simbolo].ambito == TablaActual.id and tabla.simbolos[simbolo].tipo == TS.TIPO.TUPLA:
TuplasTabla.append(tabla.simbolos[simbolo])
if tabla.simbolos[simbolo].ambito == TablaActual.id and tabla.simbolos[simbolo].tipo == TS.TIPO.COLUMN:
ColumnasTabla.append(tabla.simbolos[simbolo])
#OBTENER CAMPO DE CONDICION
#Condicion = self.wherecond.tipo
try:
if self.wherecond.tipo.exp:
Condicion = self.wherecond.tipo.ejecutar()
if isinstance(Condicion,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
except:
try:
if self.wherecond.tipo.exp1:
Condicion = self.wherecond.tipo.ejecutar()
if isinstance(Condicion,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
except:
Condicion = self.wherecond.tipo
NombreColumna = self.cond.iden
columnacond = self.wherecond.iden
try:
#cond2 = self.wherecond.tipo2
try:
if self.wherecond.tipo2.exp:
cond2 = self.wherecond.tipo2.ejecutar()
if isinstance(cond2,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
except:
try:
if self.wherecond.tipo2.exp1:
cond2 = self.wherecond.tipo2.ejecutar()
if isinstance(cond2,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
except:
cond2 = self.wherecond.tipo2
TuplasMod = []
for columna in ColumnasTabla:
if columna.nombre == NombreColumna:
ColumnaModificar = columna
break
for columna in ColumnasTabla:
if columna.nombre == columnacond:
ColumnaCondicion = columna
break
for tupla in TuplasTabla:
if Condicion <= tupla.registro[ColumnaCondicion.numcol] and tupla.registro[ColumnaCondicion.numcol] <= cond2:
TuplasMod.append(tupla)
for registro in TuplasMod:
registro.registro[ColumnaModificar.numcol] = self.cond.tipo
tabla.actualizar(registro)
func.update(NombreDB,self.iden,TuplasMod,ColumnasTabla)
resultadotxt += "Los registros fueron actualizados\n"
except:
for tupla in TuplasTabla:
for registro in tupla.registro:
if Condicion == registro:
TuplaModificar = tupla
break
for columna in ColumnasTabla:
if columna.nombre == NombreColumna:
ColumnaModificar = columna
break
TuplaModificar.registro[ColumnaModificar.numcol] = self.cond.tipo
tabla.actualizar(TuplaModificar)
#SE ACTUALIZA EL ARCHIVO JSON
TuplasTabla = []
for simbolo in tabla.simbolos:
if tabla.simbolos[simbolo].ambito == TablaActual.id and tabla.simbolos[simbolo].tipo == TS.TIPO.TUPLA:
TuplasTabla.append(tabla.simbolos[simbolo])
func.update(NombreDB,self.iden,TuplasTabla,ColumnasTabla)
resultadotxt += "Los registros fueron actualizados\n"
except:
"""ERROR"""
return resultadotxt
#DELETE-------------------------------------------
class delete(instruccion):
def __init__(self,iden, wherecond):
self.iden = iden
self.wherecond = wherecond
def traducir(self):
tempwherw = self.wherecond
traduccion = '\t'
traduccion += 'sql.execute("DELETE FROM ' + self.iden + ' WHERE '
if isinstance(tempwherw,wherecond1):
traduccion += ' ' + tempwherw.iden
traduccion += ' ' + tempwherw.signo
if isinstance(tempwherw.tipo, str):
traduccion += " '" + tempwherw.tipo + "'"
elif isinstance(tempwherw.tipo, (int, float, complex)):
traduccion += ' ' + str(tempwherw.tipo)
if "ejecutar" in dir(self.wherecond.tipo):
traduccion += ' ' + str(self.wherecond.tipo.ejecutar())
if isinstance(tempwherw, wherecond):
traduccion += ' ' + tempwherw.iden + ' BETWEEN'
try:
traduccion += ' ' + str(tempwherw.tipo.ejecutar())
except:
traduccion += ' ' + tempwherw.tipo
traduccion += ' AND '
try:
traduccion += ' ' + str(tempwherw.tipo2.ejecutar()) + ' '
except:
traduccion += ' ' + str(tempwherw.tipo2) + ' '
traduccion += ';")'
traduccion += '\n'
return traduccion
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
resultadotxt = ""
try:
TablaActual = tabla.BuscarNombre(self.iden)
if not TablaActual:
e = errores.CError(0,0,"No existe la tabla " + self.iden,'Semantico')
errores.insert_error(e)
return "No existe la tabla " + self.iden
TuplasTabla = []
ColumnasTabla = []
for simbolo in tabla.simbolos:
if tabla.simbolos[simbolo].ambito == TablaActual.id and tabla.simbolos[simbolo].tipo == TS.TIPO.TUPLA:
TuplasTabla.append(tabla.simbolos[simbolo])
if tabla.simbolos[simbolo].ambito == TablaActual.id and tabla.simbolos[simbolo].tipo == TS.TIPO.COLUMN:
ColumnasTabla.append(tabla.simbolos[simbolo])
resultado = func.delete(NombreDB,self.iden,ColumnasTabla)
try:
#BETWEEN
#cond2 = self.wherecond.tipo2
try:
if self.wherecond.tipo2.exp:
cond2 = self.wherecond.tipo2.ejecutar()
if isinstance(cond2,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
except:
try:
if self.wherecond.tipo2.exp1:
cond2 = self.wherecond.tipo.ejecutar()
if isinstance(cond2,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
except:
cond2 = self.wherecond.tipo2
#cond1 = self.wherecond.tipo
try:
if self.wherecond.tipo.exp:
cond1 = self.wherecond.tipo.ejecutar()
if isinstance(cond1,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
except:
try:
if self.wherecond.tipo.exp1:
cond1 = self.wherecond.tipo.ejecutar()
if isinstance(cond1,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
except:
cond1 = self.wherecond.tipo
campocond = self.wherecond.iden
for columna in ColumnasTabla:
if columna.nombre == campocond:
ColumnaMod = columna
break
ListaTuplaDelete = []
for tupla in TuplasTabla:
if tupla.registro[ColumnaMod.numcol] >= cond1 and tupla.registro[ColumnaMod.numcol] <= cond2:
ListaTuplaDelete.append(tupla)
for tupla in ListaTuplaDelete:
tabla.simbolos.pop(tupla.id)
resultadotxt += "Se eliminaron los registros de la tabla\n"
except:
#cond = self.wherecond.tipo
try:
if self.wherecond.tipo.exp:
cond = self.wherecond.tipo.ejecutar()
if isinstance(cond,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
except:
try:
if self.wherecond.tipo.exp1:
cond = self.wherecond.tipo.ejecutar()
if isinstance(cond,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
except:
cond = self.wherecond.tipo
campocond = self.wherecond.iden
for columna in ColumnasTabla:
if columna.nombre == campocond:
ColumnaMod = columna
break
ListaTuplaDelete = []
for tupla in TuplasTabla:
if tupla.registro[ColumnaMod.numcol] == cond:
ListaTuplaDelete.append(tupla)
for tupla in ListaTuplaDelete:
tabla.simbolos.pop(tupla.id)
resultadotxt += "Se eliminaron los registros de la tabla\n"
except:
"""ERROR"""
return resultadotxt
#--------------------------------------------CLASES PARA LOS INDICES--------------------------------------------------
class IndexCreate(instruccion):
'Clase principal para la creacion de indices'
def __init__(self,uniqueind, id1, id2, createind2):
self.uniqueind = uniqueind
self.id1 = id1
self.id2 = id2
self.createind2 = createind2
def traducir(self):
traduccion = '\t'
traduccion += 'sql.execute("CREATE UNIQUE INDEX ' + self.id1 + ' ON ' + self.id2 + '('
if isinstance(self.createind2, createind3):
temp = self.createind2.listacolind
for x in temp:
#falta ver si puede ser una llamada
if isinstance(x, str):
traduccion += ' '+ x + ','
if isinstance(x, llamadaF):
traduccion += x.id
traduccion = traduccion.replace(',)',')')
traduccion += ');")'
traduccion += '\n'
return traduccion.replace(',)',')')
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
global contambito
try:
NuevoIndice = TS.Simbolo(cont,self.id1,TS.TIPO.INDICE,contambito)
cont+=1
contambito+=1
NuevoIndice.uniqueind = self.uniqueind
NuevoIndice.tablaind = self.id2
if self.uniqueind != "":
NuevoIndice.tipoind = "UNIQUE INDEX"
else:
NuevoIndice.tipoind = "INDEX"
if isinstance(self.createind2, createind3):
"createind2 es createind3"
columnasdeindice = []
columnastexto = ""
for columna in self.createind2.listacolind:
if isinstance(columna, columnaind):
"ES UN OBJETO"
if isinstance(columna.propiedad, ordenind):
if NuevoIndice.ordenind == "":
NuevoIndice.ordenind = columna.propiedad.orden
columnasdeindice.append(columna.id)
else:
NuevoIndice.ordenind = "Ninguno"
columnasdeindice.append(columna.id)
else:
NuevoIndice.ordenind = "Ninguno"
columnasdeindice.append(columna.propiedad)
else:
if NuevoIndice.ordenind == "":
NuevoIndice.ordenind = "Ninguno"
columnasdeindice.append(columna)
for elemento in columnasdeindice:
columnastexto += elemento + " "
NuevoIndice.columnaind = columnastexto
NuevoIndice.listacolind = columnasdeindice
tabla.agregar(NuevoIndice)
return "Se agrego el indice " + self.id1 + " a la tabla de simbolos"
except:
return "Error al crear indice"
class createind3(instruccion):
def __init__(self,listacolind, indwhere):
self.listacolind = listacolind
self.indhwere = indwhere
class columnaind(instruccion):
def __init__(self,id, propiedad):
self.id = id
self.propiedad = propiedad
class ordenind(instruccion):
def __init__(self,orden):
self.orden = orden
class indwhere(instruccion):
def __init__(self,indnot, indwherecond):
self.indnot = indnot
self.indwherecond = indwherecond
class notval(instruccion):
def __init__(self, id1, signo, id2, valortipo):
self.id1 = id1
self.signo = signo
self.id2 = id2
self.valortipo = valortipo
class indwherecond(instruccion):
def __init__(self, id, signo, valortipo):
self.id = id
self.signo = signo
self.valortipo = valortipo
#----------------------------------------------------------------------------------------------------------------------
#--------------------------------------------CLASES PARA DROP INDICES--------------------------------------------------
class IndexDrop(instruccion):
def __init__(self, tipo, listaindices, orden):
self.tipo = tipo
self.listaindices = listaindices
self.orden = orden
def traducir(self):
traduccion = ''
for x in self.listaindices:
traduccion += '\tsql.execute("DROP INDEX ' + x + ';")'+ '\n'
return traduccion
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
global contambito
textores = ""
try:
eliminar = []
for indice in self.listaindices:
if tabla.BuscarNombre(indice):
eliminarindice = tabla.BuscarNombre(indice)
eliminar.append(eliminarindice)
else:
textores += "No se encontro el indice " + indice + "\n"
for simbolo in eliminar:
if simbolo.tipo == TS.TIPO.INDICE:
tabla.simbolos.pop(simbolo.id)
textores += "Se elimino el indice " + simbolo.nombre + " de la tabla de simbolos\n"
return textores
except:
return "Error en " + self.tipo
#--------------------------------------------CLASES PARA ALTER INDICES-------------------------------------------------
class IndexAlter(instruccion):
def __init__(self, tipo, alterind2):
self.tipo = tipo
self.alterind2 = alterind2
def traducir(self):
return ''
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
global contambito
try:
if self.alterind2.tipocambio.lower() == "alter" or self.alterind2.tipocambio.lower() == "alter column":
if isinstance(self.alterind2.listacol, alterind):
if tabla.BuscarNombre(self.alterind2.id):
Indice = tabla.BuscarNombre(self.alterind2.id)
iter = 0
for col in Indice.listacolind:
if col == self.alterind2.listacol.buscarid:
Indice.listacolind[iter] = self.alterind2.listacol.nuevoid
break
iter+=1
columnastexto = ""
for elemento in Indice.listacolind:
columnastexto += elemento + " "
Indice.columnaind = columnastexto
tabla.actualizar(Indice)
if Indice.listacolind[iter] == self.alterind2.listacol.buscarid:
return "No existe la columna " + self.alterind2.listacol.buscarid + " en el indice " + self.alterind2.id
return "Se cambio la columna " + self.alterind2.listacol.buscarid + " por " + self.alterind2.listacol.nuevoid + " del indice " + self.alterind2.id
else:
return "No existe el indice" + self.alterind2.id
else:
NuevoAlterIndex = TS.Simbolo(cont,self.alterind2.id,TS.TIPO.INDICE,contambito)
cont+=1
contambito+=1
NuevoAlterIndex.tipoind = self.tipo
NuevoAlterIndex.indicesind = self.alterind2.id
NuevoAlterIndex.ordenind = self.alterind2.tipocambio
NuevoAlterIndex.tablaind = "Ninguno"
coltexto = ""
for col in self.alterind2.listacol:
coltexto += col + " "
NuevoAlterIndex.columnaind = coltexto
tabla.agregar(NuevoAlterIndex)
return "Se agrego el " + self.tipo + " a la tabla de simbolos"
except:
return "Error en " + self.tipo
class propalter(instruccion):
def __init__(self, tipocambio, id, listacol):
self.tipocambio = tipocambio
self.id = id
self.listacol = listacol
class alterind(instruccion):
def __init__(self,buscarid,nuevoid):
self.buscarid = buscarid
self.nuevoid = nuevoid
``` |
{
"source": "jmanson377/MVMOO",
"score": 3
} |
#### File: MVMOO/MVMOO/multi_mixed_optimiser.py
```python
import numpy as np
from scipy.stats import norm
from .mixed_optimiser import MVO
from scipy.optimize import shgo, differential_evolution, dual_annealing
import scipy as stats
class MVMOO(MVO):
"""
Multi variate mixed variable optimisation
"""
def __init__(self, input_dim=1, num_qual=0, num_obj=2, bounds=None, k_type='matern3', dist='manhattan', scale='bounds'):
"""
Initialisation of the class
"""
super().__init__(input_dim=input_dim, num_qual=num_qual, bounds=bounds, dist=dist, k_type=k_type)
self.num_obj = num_obj
self.scale = scale
def generatemodels(self, X, Y, scale=True, variance=1.0):
"""
Generate a list containing the models for each of the objectives
"""
self.nsamples, nobj = np.shape(Y)
models = []
if scale is True:
self.Yscaled = self.scaley(Y)
self.Xscaled = self.scaleX(X,mode=self.scale)
for i in range(nobj):
self.fitmodel(self.Xscaled, self.Yscaled[:,i].reshape((-1,1)), variance=variance)
models.append(self.model)
return models
for i in range(nobj):
self.fitmodel(X, Y[:,i].reshape((-1,1)))
models.append(self.model)
return models
def is_pareto_efficient(self, costs, return_mask = True):
"""
Find the pareto-efficient points for minimisation problem
:param costs: An (n_points, n_costs) array
:param return_mask: True to return a mask
:return: An array of indices of pareto-efficient points.
If return_mask is True, this will be an (n_points, ) boolean array
Otherwise it will be a (n_efficient_points, ) integer array of indices.
"""
is_efficient = np.arange(costs.shape[0])
n_points = costs.shape[0]
next_point_index = 0 # Next index in the is_efficient array to search for
while next_point_index<len(costs):
nondominated_point_mask = np.any(costs<costs[next_point_index], axis=1)
nondominated_point_mask[next_point_index] = True
is_efficient = is_efficient[nondominated_point_mask] # Remove dominated points
costs = costs[nondominated_point_mask]
next_point_index = np.sum(nondominated_point_mask[:next_point_index])+1
if return_mask:
is_efficient_mask = np.zeros(n_points, dtype = bool)
is_efficient_mask[is_efficient] = True
return is_efficient_mask
else:
return is_efficient
def paretofront(self, Y):
"""
Return an array of the pareto front for the system, set up for a minimising
"""
ind = self.is_pareto_efficient(Y, return_mask=False)
return Y[ind,:]
def EIM(self, X, mode='euclidean'):
"""
Calculate the expected improvment matrix for a candidate point
@ARTICLE{7908974,
author={<NAME>} and <NAME>} and J. {Liu}},
journal={IEEE Transactions on Evolutionary Computation},
title={Expected Improvement Matrix-Based Infill Criteria for Expensive Multiobjective Optimization},
year={2017},
volume={21},
number={6},
pages={956-975},
doi={10.1109/TEVC.2017.2697503},
ISSN={1089-778X},
month={Dec}}
"""
f = self.currentfront
nfx = np.shape(f)[0]
nobj = np.shape(f)[1]
nx = np.shape(X)[0]
r = 1.1 * np.ones((1, nobj))
y = np.zeros((nx, 1))
ulist = []
varlist = []
X = self.scaleX(X, mode='bounds')
for iobj in range(nobj):
u, var = self.models[iobj].predict_y(X)
ulist.append(u)
varlist.append(var)
u = np.concatenate(ulist, axis=1)
var = np.concatenate(varlist, axis=1)
std = np.sqrt(np.maximum(0,var))
u_matrix = np.reshape(u.T,(1,nobj,nx)) * np.ones((nfx,1,1))
s_matrix = np.reshape(std.T,(1,nobj,nx)) * np.ones((nfx,1,1))
f_matrix = f.reshape((nfx,nobj,1)) * np.ones((1,1,nx))
Z_matrix = (f_matrix - u_matrix) / s_matrix
EI_matrix = np.multiply((f_matrix - u_matrix), norm.cdf(Z_matrix)) + np.multiply(s_matrix, norm.pdf(Z_matrix))
if mode == 'euclidean':
y = np.min(np.sqrt(np.sum(EI_matrix**2,axis=1)),axis=0).reshape(-1,1)
elif mode == 'hypervolume':
y = np.min(np.prod(r.reshape(1,2,1) - f_matrix + EI_matrix, axis=1) - np.prod(r - f, axis=1).reshape((-1,1)),axis=0).reshape((-1,1))
elif mode == 'maxmin':
y = np.min(np.max(EI_matrix,axis=1),axis=0).reshape(-1,1)
elif mode == 'combine':
y = np.min(np.sqrt(np.sum(EI_matrix**2,axis=1)),axis=0).reshape(-1,1) +\
np.min(np.prod(r.reshape(1,2,1) - f_matrix + EI_matrix, axis=1) - \
np.prod(r - f, axis=1).reshape((-1,1)),axis=0).reshape((-1,1))
else:
y1 = np.min(np.sqrt(np.sum(EI_matrix**2,axis=1)),axis=0).reshape(-1,1)
y2 = np.min(np.prod(r.reshape(1,2,1) - f_matrix + EI_matrix, axis=1) - np.prod(r - f, axis=1).reshape((-1,1)),axis=0).reshape((-1,1))
#y3 = np.min(np.max(EI_matrix,axis=1),axis=0).reshape(-1,1)
return np.hstack((y1,y2))
return y
def CEIM_Hypervolume(self, X):
"""
Calculate the expected improvment matrix for a candidate point, given constraints
@ARTICLE{7908974,
author={<NAME>} and <NAME>} and <NAME>}},
journal={IEEE Transactions on Evolutionary Computation},
title={Expected Improvement Matrix-Based Infill Criteria for Expensive Multiobjective Optimization},
year={2017},
volume={21},
number={6},
pages={956-975},
doi={10.1109/TEVC.2017.2697503},
ISSN={1089-778X},
month={Dec}}
"""
f = self.currentfront
nobj = np.shape(f)[1]
nx = np.shape(X)[0]
r = 1.1 * np.ones((1, nobj))
y = np.zeros((nx, 1))
ulist = []
varlist = []
for iobj in range(nobj):
u, var = self.models[iobj].predict_y(X)
ulist.append(u)
varlist.append(var)
u = np.concatenate(ulist, axis=1)
var = np.concatenate(varlist, axis=1)
std = np.sqrt(np.maximum(0,var))
for ix in range(nx):
Z = (f - u[ix,:]) / std[ix,:]
EIM = np.multiply((f - u[ix,:]), norm.cdf(Z)) + np.multiply(std[ix,:], norm.pdf(Z))
y[ix] = np.min(np.prod(r - f + EIM, axis=1) - np.prod(r - f, axis=1))
# Constraints
ncon = len(self.constrainedmodels)
uconlist = []
varconlist = []
for iobj in range(ncon):
ucon, varcon = self.constrainedmodels[iobj].predict_y(X)
uconlist.append(ucon)
varconlist.append(varcon)
ucon = np.concatenate(uconlist, axis=1)
varcon = np.concatenate(varconlist, axis=1)
stdcon = np.sqrt(np.maximum(0,varcon))
PoF = np.prod(norm.cdf((0 - ucon) / stdcon), axis=1).reshape(-1,1)
return y * PoF
def AEIM_Hypervolume(self, X):
"""
Calculate the adaptive expected improvment matrix for a candidate point
Adaptive addition based on https://arxiv.org/pdf/1807.01279.pdf
"""
f = self.currentfront
c = self.contextual
nfx = np.shape(f)[0]
nobj = np.shape(f)[1]
nx = np.shape(X)[0]
r = 1.1 * np.ones((1, nobj))
y = np.zeros((nx, 1))
ulist = []
varlist = []
for iobj in range(nobj):
u, var = self.models[iobj].predict_y(X)
ulist.append(u)
varlist.append(var)
u = np.concatenate(ulist, axis=1)
var = np.concatenate(varlist, axis=1)
std = np.sqrt(np.maximum(0,var))
u_matrix = np.reshape(u.T,(1,nobj,nx)) * np.ones((nfx,1,1))
s_matrix = np.reshape(std.T,(1,nobj,nx)) * np.ones((nfx,1,1))
f_matrix = f.reshape((nfx,nobj,1)) * np.ones((1,1,nx))
c_matrix = c.reshape((nfx,nobj,1)) * np.ones((1,1,nx))
Z_matrix = (f_matrix - u_matrix - c_matrix) / s_matrix
EI_matrix = np.multiply((f_matrix - u_matrix), norm.cdf(Z_matrix)) + np.multiply(s_matrix, norm.pdf(Z_matrix))
y = np.min(np.prod(r.reshape(1,2,1) - f_matrix + EI_matrix, axis=1) - np.prod(r - f, axis=1).reshape((-1,1)),axis=0).reshape((-1,1))
#for ix in range(nx):
# Z = (f - u[ix,:] - c) / std[ix,:]
# EIM = np.multiply((f - u[ix,:]), norm.cdf(Z)) + np.multiply(std[ix,:], norm.pdf(Z))
# y[ix] = np.min(np.prod(r - f + EIM, axis=1) - np.prod(r - f, axis=1))
return y
def AEIM_Euclidean(self, X):
"""
Calculate the expected improvment matrix for a candidate point
@ARTICLE{7908974,
author={<NAME>} and <NAME>} and J. {Liu}},
journal={IEEE Transactions on Evolutionary Computation},
title={Expected Improvement Matrix-Based Infill Criteria for Expensive Multiobjective Optimization},
year={2017},
volume={21},
number={6},
pages={956-975},
doi={10.1109/TEVC.2017.2697503},
ISSN={1089-778X},
month={Dec}}
"""
f = self.currentfront
c = self.contextual
nfx = np.shape(f)[0]
nobj = np.shape(f)[1]
nx = np.shape(X)[0]
y = np.zeros((nx, 1))
ulist = []
varlist = []
X = self.scaleX(X, mode='bounds')
for iobj in range(nobj):
u, var = self.models[iobj].predict_f(X)
ulist.append(u)
varlist.append(var)
u = np.concatenate(ulist, axis=1)
var = np.concatenate(varlist, axis=1)
std = np.sqrt(np.maximum(0,var))
u_matrix = np.reshape(u.T,(1,nobj,nx)) * np.ones((nfx,1,1))
s_matrix = np.reshape(std.T,(1,nobj,nx)) * np.ones((nfx,1,1))
f_matrix = f.reshape((nfx,nobj,1)) * np.ones((1,1,nx))
c_matrix = c.reshape((nfx,nobj,1)) * np.ones((1,1,nx))
Z_matrix = (f_matrix - u_matrix - c_matrix) / s_matrix
EI_matrix = np.multiply((f_matrix - u_matrix), norm.cdf(Z_matrix)) + np.multiply(s_matrix, norm.pdf(Z_matrix))
y = np.min(np.sqrt(np.sum(EI_matrix**2,axis=1)),axis=0).reshape(-1,1)
return y
def EIMoptimiserWrapper(self, Xcont, Xqual, constraints=False, mode='euclidean'):
X = np.concatenate((Xcont.reshape((1,-1)), Xqual.reshape((1,-1))), axis=1)
if constraints is not False:
return -self.CEIM_Hypervolume(X)
return -self.EIM(X,mode).reshape(-1)
def AEIMoptimiserWrapper(self, Xcont, Xqual, constraints=False):
X = np.concatenate((Xcont.reshape((1,-1)), Xqual.reshape((1,-1))), axis=1)
return -self.AEIM_Euclidean(X).reshape(-1)
def EIMmixedoptimiser(self, constraints, algorithm='Random Local', values=None, mode='euclidean'):
"""
Optimise EI search whole domain
"""
if algorithm == 'Random':
Xsamples = self.sample_design(samples=10000, design='halton')
if constraints is False:
fvals = self.EIM(Xsamples, mode=mode)
else:
fvals = self.CEIM_Hypervolume(Xsamples)
fmax = np.amax(fvals)
indymax = np.argmax(fvals)
xmax = Xsamples[indymax,:]
if values is None:
return fmax, xmax
return fmax, xmax, fvals, Xsamples
elif algorithm == 'Random Local':
Xsamples = self.sample_design(samples=10000, design='halton')
if constraints is False:
fvals = self.EIM(Xsamples, mode=mode)
else:
fvals = self.CEIM_Hypervolume(Xsamples)
if mode == 'all':
fmax = np.max(fvals,axis=0)
print(fvals.shape)
print(fmax.shape)
indmax = np.argmax(fvals,axis=0)
print(indmax)
xmax = Xsamples[indmax,:]
qual = xmax[:,-self.num_qual:].reshape(-1)
bnd = list(self.bounds[:,:self.num_quant].T)
bndlist = []
for element in bnd:
bndlist.append(tuple(element))
modes = ['euclidean', 'hypervolume']
results = []
for i in range(2):
results.append(stats.optimize.minimize(self.EIMoptimiserWrapper, xmax[i,:-self.num_qual].reshape(-1), args=(qual[i],constraints,modes[i]), bounds=bndlist,method='SLSQP'))
xmax = np.concatenate((results[0].x, qual[0]),axis=None)
xmax = np.vstack((xmax,np.concatenate((results[1].x, qual[1]),axis=None)))
fmax = np.array((results[0].fun,results[1].fun))
return fmax, xmax
fmax = np.amax(fvals)
indymax = np.argmax(fvals)
xmax = Xsamples[indymax,:]
qual = xmax[-self.num_qual:]
bnd = list(self.bounds[:,:self.num_quant].T)
bndlist = []
for element in bnd:
bndlist.append(tuple(element))
result = stats.optimize.minimize(self.EIMoptimiserWrapper, xmax[:-self.num_qual].reshape(-1), args=(qual,constraints,mode), bounds=bndlist,method='SLSQP')
if values is None:
return result.fun, np.concatenate((result.x, qual),axis=None)
return fmax, xmax, fvals, Xsamples
else:
raise NotImplementedError()
def AEIMmixedoptimiser(self, constraints, algorithm='Random', values=None):
# Get estimate for mean variance of model using halton sampling
X = self.sample_design(samples=10000, design='halton')
X = self.scaleX(X, mode='bounds')
varlist = []
for iobj in range(self.num_obj):
_ , var = self.models[iobj].predict_y(X)
varlist.append(var)
var = np.concatenate(varlist, axis=1)
meanvar = np.mean(var,axis=0)
f = self.currentfront
self.contextual = np.divide(meanvar, f)
# Optimise acquisition
if algorithm == 'Random':
Xsamples = self.sample_design(samples=10000, design='halton')
fvals = self.AEIM_Hypervolume(Xsamples)
fmax = np.amax(fvals)
indymax = np.argmax(fvals)
xmax = Xsamples[indymax,:]
if values is None:
return fmax, xmax
return fmax, xmax, fvals, Xsamples
elif algorithm == 'Random Local':
Xsamples = self.sample_design(samples=10000, design='halton')
if constraints is False:
fvals = self.AEIM_Euclidean(Xsamples)
else:
raise NotImplementedError()
fmax = np.amax(fvals)
indymax = np.argmax(fvals)
xmax = Xsamples[indymax,:]
qual = xmax[-self.num_qual:]
bnd = list(self.bounds[:,:self.num_quant].T)
bndlist = []
for element in bnd:
bndlist.append(tuple(element))
result = stats.optimize.minimize(self.AEIMoptimiserWrapper, xmax[:-self.num_qual].reshape(-1), args=(qual,constraints), bounds=bndlist,method='SLSQP')
if values is None:
return result.fun, np.concatenate((result.x, qual),axis=None)
return fmax, xmax, fvals, Xsamples
elif algorithm == 'SHGO':
if self.num_qual < 1:
bnd = list(self.bounds.T)
bndlist = []
for element in bnd:
bndlist.append(tuple(element))
result = shgo(self.AEIM_Hypervolume,bndlist, sampling_method='sobol', n=30, iters=2)
return result.x, result.fun
else:
sample = self.sample_design(samples=1, design='random')
contbnd = list(self.bounds[:,:self.num_quant].T)
contbndlist = []
qual = sample[:,-self.num_qual:]
for element in contbnd:
contbndlist.append(tuple(element))
resXstore = []
resFstore = []
for i in range(np.shape(qual)[0]):
result = shgo(self.AEIMoptimiserWrapper, contbndlist, args=(qual[i,:]), sampling_method='sobol', n=30, iters=2)
resXstore.append(result.x)
resFstore.append(result.fun)
# sort for each discrete combination and get best point
ind = resFstore.index(min(resFstore))
xmax = np.concatenate((resXstore[ind],qual[ind,:]))
fval = min(resFstore)
return fval, xmax
elif algorithm == 'DE':
if self.num_qual < 1:
bnd = list(self.bounds.T)
bndlist = []
for element in bnd:
bndlist.append(tuple(element))
result = differential_evolution(self.AEIM_Hypervolume,bndlist)
return result.x, result.fun
else:
sample = self.sample_design(samples=1, design='random')
contbnd = list(self.bounds[:,:self.num_quant].T)
contbndlist = []
qual = sample[:,-self.num_qual:]
for element in contbnd:
contbndlist.append(tuple(element))
resXstore = []
resFstore = []
for i in range(np.shape(qual)[0]):
result = dual_annealing(self.AEIMoptimiserWrapper, contbndlist, args=(qual[i,:]))
resXstore.append(result.x)
resFstore.append(result.fun)
# sort for each discrete combination and get best point
ind = resFstore.index(min(resFstore))
xmax = np.concatenate((resXstore[ind],qual[ind,:]))
fval = min(resFstore)
return fval, xmax
return
def multinextcondition(self, X, Y, constraints=False, values=None, method='EIM', mode='euclidean'):
"""
Suggest the next condition for evaluation
"""
if constraints is False:
try:
self.k_type = 'matern3'
self.models = self.generatemodels(X, Y)
except:
print('Initial model optimisation failed, retrying with new kernel')
try:
self.k_type = 'matern5'
self.models = self.generatemodels(X, Y)
except:
print('Model optimisation failed, retrying with new value of variance')
for variance in [0.1,1,2,10]:
try:
self.models = self.generatemodels(X, Y, variance=variance)
except:
print('Model optimisation failed, retrying with new value of variance')
self.currentfront = self.paretofront(self.Yscaled)
means = []
for model in self.models:
mean, _ = model.predict_y(self.sample_design(samples=2, design='halton'))
means.append(mean.numpy())
if np.any(means == np.nan):
print("Retraining model with new starting variance")
self.models = self.generatemodels(X, Y, variance=0.1)
if method == 'AEIM':
fmax, xmax = self.AEIMmixedoptimiser(constraints, algorithm='Random Local')
else:
fmax, xmax = self.EIMmixedoptimiser(constraints, algorithm='Random Local',mode=mode)
if values is None and mode != 'all':
return xmax.reshape(1,-1), fmax
elif values is None and mode == 'all':
if np.allclose(xmax[0,:],xmax[1,:], rtol=1e-3, atol=1e-5):
return xmax[0,:].reshape(1,-1), fmax[0]
return np.unique(xmax.round(6),axis=0), fmax
self.models = self.generatemodels(X,Y)
self.currentfront = self.paretofront(self.Yscaled)
self.constrainedmodels = self.generatemodels(X, constraints, scale=False)
fmax, xmax = self.EIMmixedoptimiser(constraints, algorithm='Simplical')
if values is None:
return xmax.reshape(1,-1), fmax
``` |
{
"source": "jmanson377/PySMSIM",
"score": 4
} |
#### File: PySMSIM/examples/matyas.py
```python
import numpy as np
def matyas(X):
return (0.26 * (X[:,0] ** 2 + X[:,1] ** 2) - 0.48 * X[:,0] * X[:,1]).reshape(-1)
if __name__ == "__main__":
print(matyas(np.array([1,1]).reshape(1,-1)))
print(matyas(np.array([0,0]).reshape(1,-1)))
print(matyas(np.array([[0,0],[1,1]]).reshape(-1,2)))
print(matyas(np.array([0,0]).reshape(1,-1)).shape)
print(matyas(np.array([[0,0],[1,1]]).reshape(-1,2)).shape)
``` |
{
"source": "jmansour/underworld2",
"score": 2
} |
#### File: development/docs_generator/generate_api_documentation.py
```python
ignorelist = ['underworld', 'glucifer', 'json', 'os', 'libUnderworld', 'glob', 'numpy', 'sys', 'os', 'time', 'control', 'LavaVuPython', 'lavavu', 're']
import os
builddir="build"
imagedir=os.path.join(builddir,"images")
try:
os.mkdir(builddir)
except FileExistsError:
pass
try:
os.mkdir(imagedir)
except FileExistsError:
pass
# copy images in
import shutil
import os
import glob
for filename in glob.glob(os.path.join("../../../underworld/function/images/", '*.png')):
shutil.copy(filename, imagedir)
done_mods = set()
def doc_module(module, modname):
filename = modname+'.rst'
filepath = os.path.join(builddir,filename)
print("Generating {} for module {}".format(filepath,modname))
import inspect
# first gather info
modules = {}
classes = {}
functions = {}
for guy in dir(module):
if guy[0] != "_": # don't grab private guys
if guy not in ignorelist: # don't grab these
obj = getattr(module,guy)
if inspect.ismodule(obj):
if obj.__file__ not in done_mods:
done_mods.add(obj.__file__)
modules[guy] = obj
# else:
# return
elif inspect.isclass(obj):
classes[guy] = obj
elif inspect.isfunction(obj):
functions[guy] = obj
# everything alphabetically
from collections import OrderedDict
modules = OrderedDict(sorted(modules.items()))
classes = OrderedDict(sorted(classes.items()))
functions = OrderedDict(sorted(functions.items()))
# create a new file for each module
with open(filepath, 'w') as f:
# write title
title = modname + " module\n"
f.write(title)
f.write("="*(len(title)-1)+"\n")
# write out the modules docstring
if module.__doc__:
f.write(module.__doc__+"\n")
f.write("\n")
f.write("Module Summary\n")
f.write("--------------\n")
# write submodules brief
if len(modules)>0:
f.write("submodules:\n")
f.write("~~~~~~~~~~~\n")
f.write("\n")
f.write(".. toctree::\n")
f.write(" :maxdepth: 1\n")
# f.write(".. autosummary::\n")
# f.write(" :nosignatures:\n")
# f.write(" :toctree:\n")
f.write("\n")
for key in modules.keys():
f.write(" "+ modname + "." + key+"\n")
f.write("\n")
# write functions brief
if len(functions)>0:
f.write("functions:\n")
f.write("~~~~~~~~~~\n")
f.write("\n")
f.write(".. autosummary::\n")
f.write(" :nosignatures:\n")
f.write("\n")
for key in functions.keys():
f.write(" "+ modname + "." + key+"\n")
f.write("\n")
# write classes brief
if len(classes)>0:
f.write("classes:\n")
f.write("~~~~~~~~\n")
f.write("\n")
f.write(".. autosummary::\n")
f.write(" :nosignatures:\n")
f.write("\n")
for key in classes.keys():
f.write(" "+ modname + "." + key+"\n")
f.write("\n")
f.write("Module Details\n")
f.write("--------------\n")
# write functions
if len(functions)>0:
f.write("functions:\n")
f.write("~~~~~~~~~~\n")
for key in functions.keys():
funcguy = getattr(module, key)
f.write(".. autofunction:: "+ modname + "." + key+"\n")
f.write("\n")
# write classes
if len(classes)>0:
f.write("classes:\n")
f.write("~~~~~~~~\n")
for key in classes.keys():
classguy = getattr(module, key)
f.write(".. autoclass:: "+ modname + "." + key+"\n")
f.write(" :members:\n")
f.write(" :show-inheritance:\n")
f.write("\n")
# recurse
for key in modules.keys():
doc_module(getattr(module,key), modname+"."+key)
import underworld
doc_module(underworld, 'underworld')
import glucifer
doc_module(glucifer, 'glucifer')
```
#### File: docs/test/collective_fail.py
```python
def fail_collective():
import underworld as uw
raise RuntimeError("Collective error.")
collective_msg = b'An uncaught exception appears to have been raised by all processes. Set the \'UW_ALL_MESSAGES\' environment variable to see all messages. Rank 0 message is:\nTraceback (most recent call last):\n File "collective_fail.py", line 18, in <module>\n fail_collective()\n File "collective_fail.py", line 4, in fail_collective\n raise RuntimeError("Collective error.")\nRuntimeError: Collective error.\n'
def fail_single():
import underworld as uw
if uw.mpi.rank==1:
raise RuntimeError("Rank 1 error.")
single_msg = b'An uncaught exception was encountered on processor 1.\nTraceback (most recent call last):\n File "collective_fail.py", line 20, in <module>\n fail_single()\n File "collective_fail.py", line 10, in fail_single\n raise RuntimeError("Rank 1 error.")\nRuntimeError: Rank 1 error.\n'
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
if sys.argv[1] == "collective":
fail_collective()
elif sys.argv[1] == "single":
fail_single()
else:
import subprocess
command = "mpirun -np 2 python collective_fail.py"
result = subprocess.run(command + " collective", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
if not result.stderr.startswith(collective_msg):
raise RuntimeError("Incorrect collective error message encountered. \n"
"Expected:\n{}\n\n"
"Encountered:\n{}\n\n".format(collective_msg,result.stderr[0:len(collective_msg)+1]))
result = subprocess.run(command + " single", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
if not result.stderr.startswith(single_msg):
raise RuntimeError("Incorrect collective error message encountered. \n"
"Expected:\n{}\n\n"
"Encountered:\n{}\n\n".format(single_msg,result.stderr[0:len(single_msg)+1]))
```
#### File: docs/test/mesh_aux.py
```python
import underworld as uw
import numpy as np
def meshtest(res, partitioned):
mesh = uw.mesh.FeMesh_Cartesian(elementRes=(res,res),partitioned=partitioned)
resp1 = res + 1
if not partitioned:
if (len(mesh.data) != (resp1*resp1)):
raise RuntimeError("A non-partitioned mesh should report identical vertex count "\
"independent of processor count.")
# test save/load of mesh
with mesh.deform_mesh():
mesh.data[:] *= 2.
cpy = mesh.data.copy()
mesh.save('temp.h5')
mesh.reset()
if np.allclose(mesh.data, cpy):
raise RuntimeError("These arrays should be different.")
mesh.load('temp.h5')
if not np.allclose(mesh.data, cpy):
raise RuntimeError("These arrays should be identical.")
# test save/load of meshvariable
var = uw.mesh.MeshVariable( mesh, nodeDofCount=2 )
for ind, coord in enumerate(mesh.data):
var.data[ind] = [coord[1]+5., coord[0]*-2.]
var.syncronise()
cpy = var.data.copy()
var.save('temp2.h5')
var.data[:] = 0.
if np.allclose(var.data, cpy):
raise RuntimeError("These arrays should be different.")
var.load('temp2.h5')
if not np.allclose(var.data, cpy):
if uw.mpi.rank==0:
print("VAR")
print(var.data[:])
print("CPY")
print(cpy)
raise RuntimeError("These arrays should be identical.")
if __name__ == '__main__':
import underworld as uw
uw.utils._io.PATTERN=1 # sequential
meshtest(16,True)
meshtest(8, True) # second run to make sure we're deleting datasets where different sizes
meshtest(16,False)
uw.utils._io.PATTERN=2 # collective
meshtest(16,True)
meshtest(8, True)
# meshtest(16,False) # this isn't a good idea, so we shouldn't do it.
if uw.mpi.rank==0:
import os
os.remove('temp.h5')
os.remove('temp2.h5')
```
#### File: docs/test/rt_timed.py
```python
import os
os.environ['UW_ENABLE_TIMING'] = '1'
import underworld as uw
from underworld import function as fn
import glucifer
import math
import numpy as np
from collections import defaultdict
from time import time
uw.timing.start()
timing_data = defaultdict(lambda: [0,0.])
def add_timing(name, time):
data = timing_data[name]
data[0] += 1
data[1] += time
import os
res = 16
RESKEY = "UW_RESOLUTION"
if RESKEY in os.environ:
res = int(os.environ[RESKEY])
ts = time()
mesh = uw.mesh.FeMesh_Cartesian(elementRes = (res, res, res),
minCoord = ( 0., 0., 0., ),
maxCoord = ( 1., 1., 1., ))
add_timing("FeMesh_Cartesian.__init__()", time()-ts)
ts = time()
velocityField = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=3 )
add_timing("MeshVariable.__init__()", time()-ts)
ts = time()
pressureField = uw.mesh.MeshVariable( mesh=mesh.subMesh, nodeDofCount=1 )
add_timing("MeshVariable.__init__()", time()-ts)
ts = time()
temperatureField = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=1 )
add_timing("MeshVariable.__init__()", time()-ts)
ts = time()
temperatureFieldDeriv = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=1 )
add_timing("MeshVariable.__init__()", time()-ts)
# initialise
velocityField.data[:] = [0.,0.,0.]
pressureField.data[:] = 0.
for index, coord in enumerate(mesh.data):
temperatureField.data[index] = coord[2]
temperatureFieldDeriv.data[:] = 0.
# Create a swarm.
ts = time()
swarm = uw.swarm.Swarm( mesh=mesh )
add_timing("Swarm.__init__()", time()-ts)
# Create a data variable. It will be used to store the material index of each particle.
materialIndex = swarm.add_variable( dataType="int", count=1 )
# Create a layout object, populate the swarm with particles.
swarmLayout = uw.swarm.layouts.PerCellSpaceFillerLayout( swarm=swarm, particlesPerCell=40 )
ts = time()
swarm.populate_using_layout( layout=swarmLayout )
add_timing("Swarm.populate_using_layout()", time()-ts)
# define these for convience.
denseIndex = 0
lightIndex = 1
# material perturbation from van Keken et al. 1997
wavelength = 2.0
amplitude = 0.02
offset = 0.2
k = 2. * math.pi / wavelength
# Create function to return particle's coordinate
coord = fn.coord()
# Define the material perturbation, a function of the x coordinate (accessed by `coord[0]`).
perturbationFn = offset + amplitude*fn.math.cos( k*coord[0] )
# Setup the conditions list.
# If z is less than the perturbation, set to lightIndex.
conditions = [ ( perturbationFn > coord[1] , lightIndex ),
( True , denseIndex ) ]
# The swarm is passed as an argument to the evaluation, providing evaluation on each particle.
# Results are written to the materialIndex swarm variable.
fnc = fn.branching.conditional( conditions )
ts = time()
matdat = fnc.evaluate(swarm)
add_timing("Function.evaluate()", time()-ts)
materialIndex.data[:] = matdat
store = glucifer.Store('RT')
fig = glucifer.Figure( store, name="firstFig" )
fig.append( glucifer.objects.Points(swarm, materialIndex, pointSize=2, colourBar=False) )
fig.append( glucifer.objects.Surface(mesh, pressureField))
fig.append( glucifer.objects.VectorArrows( mesh, velocityField, scaling=1.0e2))
# Set a density of '0.' for light material, '1.' for dense material.
densityMap = { lightIndex:0., denseIndex:1. }
densityFn = fn.branching.map( fn_key = materialIndex, mapping = densityMap )
# Set a viscosity value of '1.' for both materials.
viscosityMap = { lightIndex:1., denseIndex:1. }
fn_viscosity = fn.branching.map( fn_key = materialIndex, mapping = viscosityMap )
# Define a vertical unit vector using a python tuple.
z_hat = ( 0., 0., 1. )
# Create buoyancy force vector
buoyancyFn = -densityFn*z_hat
# Construct node sets using the mesh specialSets
iWalls = mesh.specialSets["MinI_VertexSet"] + mesh.specialSets["MaxI_VertexSet"]
jWalls = mesh.specialSets["MinJ_VertexSet"] + mesh.specialSets["MaxJ_VertexSet"]
kWalls = mesh.specialSets["MinK_VertexSet"] + mesh.specialSets["MaxK_VertexSet"]
allWalls = iWalls + jWalls + kWalls
# Prescribe degrees of freedom on each node to be considered Dirichlet conditions.
# In the x direction on allWalls flag as Dirichlet
# In the y direction on jWalls (horizontal) flag as Dirichlet
stokesBC = uw.conditions.DirichletCondition( variable = velocityField,
indexSetsPerDof = (allWalls, allWalls, kWalls))
advdiffBc = uw.conditions.DirichletCondition( variable = temperatureField,
indexSetsPerDof = kWalls )
ts = time()
stokes = uw.systems.Stokes( velocityField = velocityField,
pressureField = pressureField,
# voronoi_swarm = swarm,
conditions = stokesBC,
fn_viscosity = fn_viscosity,
fn_bodyforce = buoyancyFn )
add_timing("Stokes.__init__()", time()-ts)
solver = uw.systems.Solver( stokes )
# Create a system to advect the swarm
advector = uw.systems.SwarmAdvector( swarm=swarm, velocityField=velocityField, order=2 )
# Create a dummy temperature field.
ts = time()
advdiff = uw.systems.AdvectionDiffusion(velocityField=velocityField, phiField=temperatureField, phiDotField=temperatureFieldDeriv,
fn_diffusivity=1.,conditions=advdiffBc)
add_timing("AdvectionDiffusion.__init__()", time()-ts)
# functions for calculating RMS velocity
vdotv = fn.math.dot(velocityField,velocityField)
ts = time()
v2sum_integral = uw.utils.Integral( mesh=mesh, fn=vdotv )
add_timing("Integral.__init__()", time()-ts)
ts = time()
volume_integral = uw.utils.Integral( mesh=mesh, fn=1. )
add_timing("Integral.__init__()", time()-ts)
# Get instantaneous Stokes solution
ts = time()
solver.solve()
add_timing("StokesSolver.solve()", time()-ts)
# Calculate the RMS velocity.
vrms = math.sqrt( v2sum_integral.evaluate()[0] )
# update
dt1 = advector.get_max_dt()
dt2 = advdiff.get_max_dt()
dt = min(dt1,dt2)
# Advect using this timestep size.
ts = time()
advector.integrate(dt)
add_timing("SwarmAdvector.integrate()", time()-ts)
ts = time()
advdiff.integrate(dt)
add_timing("AdvectionDiffusion.integrate()", time()-ts)
# Save things
ts = time()
meshFileHandle = mesh.save("Mesh.h5")
add_timing("FeMesh.save()", time()-ts)
uw.mpi.barrier()
if uw.mpi.rank == 0:
os.remove("Mesh.h5")
ts = time()
vFH = velocityField.save("velocityField.h5")
add_timing("MeshVariable.save()", time()-ts)
uw.mpi.barrier()
if uw.mpi.rank == 0:
os.remove("velocityField.h5")
ts = time()
swarmFileHandle = swarm.save("Swarm.h5")
add_timing("Swarm.save()", time()-ts)
# Timing for this guy is consistently out of tolerance (like 30%).
# It is a very fast call, so not concerned.
mH = materialIndex.save("materialIndex.h5")
uw.mpi.barrier()
if uw.mpi.rank == 0:
os.remove("Swarm.h5")
os.remove("materialIndex.h5")
ts = time()
fig.save()
add_timing("Figure.save()", time()-ts)
uw.mpi.barrier()
if uw.mpi.rank == 0:
os.remove("RT.gldb")
uw.timing.print_table(group_by="routine")
if uw.mpi.rank == 0:
import numpy as np
module_timing_data = uw.timing.get_data(group_by="routine")
for key in timing_data.keys():
valuescript = timing_data[key]
valuemod = module_timing_data[(key,1)]
if not np.isclose(valuescript[1],valuemod[1], rtol=0.15):
raise RuntimeError( "Timing for '{}' not within tolerance ( {}: {} ).".format(key,valuescript[1],valuemod[1]) )
# simple test for file output
fname = "timing_test.txt"
uw.timing.print_table(group_by="routine", output_file=fname)
if uw.mpi.rank == 0:
import os.path
exists = os.path.isfile(fname)
if not exists:
raise RuntimeError( "Timing output to file does not appear to have worked." )
os.remove(fname)
```
#### File: config/packages/libXML2.py
```python
import os
from config import Package
class libXML2(Package):
def gen_base_extensions(self):
for e in Package.gen_base_extensions(self):
yield e
yield ([os.path.join(i, 'libxml2') for i in e[0]], e[1])
def gen_envs(self, loc):
for env in Package.gen_envs(self, loc):
self.headers = [os.path.join('libxml', 'parser.h')]
if self.find_libraries(loc[2], 'xml2'):
env.PrependUnique(LIBS=['xml2'])
yield env
```
#### File: systems/sle/_assembledmatrix.py
```python
import underworld as uw
import underworld._stgermain as _stgermain
from . import _assembledvector
import libUnderworld
class AssembledMatrix(_stgermain.StgCompoundComponent):
"""
Matrix object, generally assembled as a result of the FEM
framework.
Parameters
----------
meshVariableRow: underworld.mesh.MeshVariable
MeshVariable object for matrix row.
meshVariableCol: underworld.mesh.MeshVariable
MeshVariable object for matrix column.
"""
_objectsDict = { "_matrix": "StiffnessMatrix" }
_selfObjectName = "_matrix"
def __init__(self, rowVector, colVector, rhs=None, rhs_T=None, assembleOnNodes=False, **kwargs):
if not isinstance(rowVector, uw.systems.sle.SolutionVector):
raise TypeError("'rowVector' object passed in must be of type 'SolutionVector'")
if not isinstance(colVector, uw.systems.sle.SolutionVector):
raise TypeError("'colVector' object passed in must be of type 'SolutionVector'")
self._meshVariableRow = rowVector.meshVariable
self._meshVariableCol = colVector.meshVariable
if rhs and not isinstance(rhs, _assembledvector.AssembledVector):
raise TypeError("'rhs' object passed in must be of type 'AssembledVector'")
if rhs_T and not isinstance(rhs_T, _assembledvector.AssembledVector):
raise TypeError("'rhs_T' object passed in must be of type 'AssembledVector'")
self._rhs = rhs
self._rhs_T = rhs_T
if not isinstance( assembleOnNodes, bool ):
raise TypeError("'assembleOnNodes' must be of type 'bool'.")
self.assembleOnNodes = assembleOnNodes
# build parent
super(AssembledMatrix,self).__init__(**kwargs)
self._cself.rowEqNum = rowVector.eqNumber._cself
self._cself.colEqNum = colVector.eqNumber._cself
@property
def meshVariableRow(self):
return self._meshVariableRow
@property
def meshVariableCol(self):
return self._meshVariableCol
def _add_to_stg_dict(self,componentDictionary):
# call parents method
super(AssembledMatrix,self)._add_to_stg_dict(componentDictionary)
componentDictionary[ self._matrix.name ][ "RowVariable"] = self._meshVariableRow._cself.name
componentDictionary[ self._matrix.name ]["ColumnVariable"] = self._meshVariableCol._cself.name
componentDictionary[ self._matrix.name ]["dim"] = self._meshVariableCol._mesh.generator.dim
if self._rhs:
componentDictionary[ self._matrix.name ][ "RHS"] = self._rhs._cself.name
if self._rhs_T:
componentDictionary[ self._matrix.name ]["transposeRHS"] = self._rhs_T._cself.name
if self.assembleOnNodes == False:
componentDictionary[ self._matrix.name ]["assembleOnNodes"] = "False"
else:
componentDictionary[ self._matrix.name ]["assembleOnNodes"] = "True"
# def _setup(self):
# # add terms to vector
# for term in self._assemblyTerms:
# term._cself.stiffnessMatrix = self._cself
# libUnderworld.StgFEM.StiffnessMatrix_AddStiffnessMatrixTerm( self._cself, term._cself )
#
# def AddTerm(self, assemblyTerm):
# self._assemblyTerms.append(assemblyTerm)
# assemblyTerm._cself.stiffnessMatrix = self._cself
# libUnderworld.StgFEM.StiffnessMatrix_AddStiffnessMatrixTerm(self._cself, assemblyTerm._cself)
``` |
{
"source": "jmantas/ethereum-tools",
"score": 3
} |
#### File: jmantas/ethereum-tools/eth-node-info-cli.py
```python
from ethtoollib.eth_rpc import *
from ethtoollib.eth_util import *
import argparse
def main():
''' Main function '''
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument("--host", dest="jrpc_host")
parser.add_argument("--port", dest="jrpc_port")
parser.add_argument("--getversion", action="store_true")
parser.add_argument("--getprotocol", action="store_true")
parser.add_argument("--getnetworkid", action="store_true")
parser.add_argument("--islistening", action="store_true")
parser.add_argument("--getpeercount", action="store_true")
parser.add_argument("--ismining", action="store_true")
parser.add_argument("--getcoinbase", action="store_true")
parser.add_argument("--gethashrate", action="store_true")
parser.add_argument("--getgasprice", action="store_true")
parser.add_argument("--getaccounts", action="store_true")
parser.add_argument("--getblocknumber", action="store_true")
parser.add_argument("--getbalance", dest="getaccount_balance", type=str)
parser.add_argument("--gettxcount", dest="getaccount_txcount", type=str)
parser.add_argument("--getstorage", dest="getaccount_storage", type=str)
parser.add_argument("--getblockbyhash", dest="getblock_by_hash", type=str)
parser.add_argument("--getlatestblockinfo", action="store_true")
parser.add_argument("--getcode", dest="getcode_from_address", type=str)
args = parser.parse_args()
eth_instance = EthRPC(args.jrpc_host, args.jrpc_port)
if args.getversion:
print eth_instance.node_version()
if args.getprotocol:
print eth_instance.protocol_version()
if args.getnetworkid:
print eth_instance.network_id()
if args.islistening:
print eth_instance.network_is_listening()
if args.getpeercount:
print eth_instance.network_peer_count()
if args.ismining:
print eth_instance.node_is_mining()
if args.getcoinbase:
print eth_instance.coinbase()
if args.gethashrate:
print eth_instance.hashrate()
if args.getgasprice:
print eth_instance.gas_price()
if args.getaccounts:
accounts_json = eth_instance.accounts()
print json_to_yamler(accounts_json)
if args.getblocknumber:
print eth_instance.current_block_number()
if args.getaccount_balance:
acc = args.getaccount_balance
balance = eth_instance.balance(acc)
print balance
if args.getaccount_txcount:
acc = args.getaccount_txcount
txcount = eth_instance.transaction_count(acc)
print txcount
if args.getaccount_storage:
acc = args.getaccount_storage
storage = eth_instance.storage_at(acc)
print acc, storage
if args.getblock_by_hash:
block_hash = args.getblock_by_hash
info = eth_instance.block_info_by_hash(block_hash)
print info
if args.getlatestblockinfo:
block_number = "latest"
latestblockinfo_json = eth_instance.block_info_by_number(block_number)
print json_to_yamler(latestblockinfo_json)
if args.getcode_from_address:
address = args.getcode_from_address
info = eth_instance.code_from_address_hash(address)
print info
if __name__ == "__main__":
main()
```
#### File: ethereum-tools/ethtoollib/eth_rpc.py
```python
import json
import requests
class EthRPC(object):
''' eth connection initialization '''
json_id = 0
def __init__(self, host=None, port=None):
self.json_rpc_host = host
self.json_rpc_port = port
self.scheme = 'http'
self.session = requests.session()
def json_id_index(self):
''' Iterator '''
self.json_id += 1
return self.json_id
def init_rpc_request(self, rpc_method, json_parameters=None):
''' JSON-RPC connector '''
json_data = json.dumps({
"jsonrpc": "2.0",
"method": rpc_method,
"params": json_parameters,
"id": self.json_id_index(),
})
url = '{http}://{host}:{port}/'.format(
http=self.scheme,
host=self.json_rpc_host,
port=self.json_rpc_port
)
json_output = self.session.post(url, data=json_data).json()
if json_output and 'error' in json_output:
raise ValueError(json_output)
return json_output
def node_version(self):
''' get node version '''
rpc_request_output = self.init_rpc_request(
"web3_clientVersion")
return rpc_request_output['result']
def protocol_version(self):
''' get protocol version '''
rpc_request_output = self.init_rpc_request(
"eth_protocolVersion")
return rpc_request_output['result']
def network_id(self):
''' get network ID '''
rpc_request_output = self.init_rpc_request(
"net_version")
return rpc_request_output['result']
def network_is_listening(self):
''' listen status '''
rpc_request_output = self.init_rpc_request(
"net_listening")
return rpc_request_output['result']
def network_peer_count(self):
''' get peer count '''
rpc_request_output = self.init_rpc_request(
"net_peerCount")
return int(rpc_request_output['result'], 16)
def blockchain_syncing(self):
''' gets blockchains sync status '''
rpc_request_output = self.init_rpc_request("eth_isSyncing")
return rpc_request_output['result']
def node_is_mining(self):
''' mining status '''
rpc_request_output = self.init_rpc_request(
"eth_mining")
return rpc_request_output['result']
def coinbase(self):
''' get coinbase '''
rpc_request_output = self.init_rpc_request(
"eth_coinbase")
return rpc_request_output['result']
def hashrate(self):
''' get hasrate '''
rpc_request_output = self.init_rpc_request(
"eth_hashrate")
return int(rpc_request_output['result'], 16)
def gas_price(self):
''' get gas price '''
rpc_request_output = self.init_rpc_request(
"eth_gasPrice")
return int(rpc_request_output['result'], 16)
def accounts(self):
''' get list of accounts '''
rpc_request_output = self.init_rpc_request(
"eth_accounts")
return rpc_request_output['result']
def current_block_number(self):
''' get latest block number '''
rpc_request_output = self.init_rpc_request(
"eth_blockNumber")
return int(rpc_request_output['result'], 16)
def balance(self, account, block_number="latest"):
''' get balance for account hash,
by default at latest synced block number '''
rpc_request_output = self.init_rpc_request(
"eth_getBalance", [account, block_number])
return int(rpc_request_output['result'], 16)
def transaction_count(self, account, block_number="latest"):
''' get transaction count for account hash '''
rpc_request_output = self.init_rpc_request(
"eth_getTransactionCount", [account, block_number])
return int(rpc_request_output['result'], 16)
def storage_at(self, account, position="0x0", block_number="latest"):
''' returns storage at account latest block in position 0x0 '''
rpc_request_output = self.init_rpc_request(
"eth_getStorageAt", [account, position, block_number])
return rpc_request_output['result']
def transaction_count_in_block(self, block_hash):
''' returns transcation count in a block(hash) '''
rpc_request_output = self.init_rpc_request(
"eth_getBlockTransactionCountByHash", [block_hash])
return int(rpc_request_output['result'], 16)
def uncle_count_from_block_hash(self, block_hash):
''' uncle count from a block(hash) '''
rpc_request_output = self.init_rpc_request(
"eth_getUncleCountByBlockHash", [block_hash])
return int(rpc_request_output['result'], 16)
def uncle_count_from_block_number(self, block_number):
''' uncle count from a block(number) '''
rpc_request_output = self.init_rpc_request(
"eth_getUncleCountByBlockNumber", [block_number])
return int(rpc_request_output['result'], 16)
def code_from_address_hash(self, address, block_number="latest"):
''' returns contract code from address '''
rpc_request_output = self.init_rpc_request(
"eth_getCode", [address, block_number])
return rpc_request_output['result']
def sign_data(self, address, data):
''' sign data '''
rpc_request_output = self.init_rpc_request("eth_sign", [address, data])
return rpc_request_output['result']
def send_transaction(self, from_address=None, to_address=None, gas=None,
gas_price=None, value=None, data=None, nonce=None):
''' send transaction '''
transaction_params = {
'from': from_address,
'to': to_address if to_address else None,
'gas': gas,
'gasPrice': gas_price,
'value': value if value else None,
'data': data if data else None
}
rpc_request_output = self.init_rpc_request(
"eth_sendTransaction", [transaction_params])
return rpc_request_output['result']
def transaction_receipt(self, transaction_hash):
''' returns transaction receipt '''
rpc_request_output = self.init_rpc_request(
"eth_getTransactionReceipt", [transaction_hash])
return rpc_request_output['result']
def eth_call(self, from_address=None, to_address=None, gas=None, gas_price=None, value=None, data=None):
''' calls contract '''
transaction_params = {
'from': from_address,
'to': to_address if to_address else None,
'gas': gas,
'gasPrice': gas_price,
'value': value if value else None,
'data': data if data else None
}
rpc_request_output = self.init_rpc_request(
"eth_call", [transaction_params])
return rpc_request_output['result']
def eth_estimate_gas(self, from_address=None, to_address=None,
gas=None, gas_price=None, value=None, data=None):
''' estimates gas per transaction by creating dummy tx which does
not get into blockchain '''
transaction_params = {
'from': from_address if from_address else None,
'to': to_address if to_address else None,
'gas': gas if gas else None,
'gasPrice': gas_price if gas_price else None,
'value': value if value else None,
'data': data if data else None
}
rpc_request_output = self.init_rpc_request(
"eth_estimateGas", [transaction_params])
return rpc_request_output['result']
def compile_solidity(self, contract_source_code):
''' compiles with solidity by RPC request '''
rpc_request_output = self.init_rpc_request(
"eth_compileSolidity", [contract_source_code])
return rpc_request_output['result']
def block_info_by_hash(self, block_hash, full_bool=True):
rpc_request_output = self.init_rpc_request(
"eth_getBlockByHash", [block_hash, full_bool])
return rpc_request_output['result']
def block_info_by_number(self, block_number, full_bool=True):
rpc_request_output = self.init_rpc_request(
"eth_getBlockByNumber", [block_number, full_bool])
return rpc_request_output['result']
def transaction_info_by_hash(self, transaction_hash):
rpc_request_output = self.init_rpc_request(
"eth_getTransactionByHash", [transaction_hash])
return rpc_request_output['result']
``` |
{
"source": "jmantas/ipfs-nodes-crawler",
"score": 3
} |
#### File: jmantas/ipfs-nodes-crawler/ipfs-nodes-crawler.py
```python
from util.pinger import *
from util.util import *
from geoip import geolite2
import sys
import json
import logging
import ipaddress
import subprocess
import pymongo
def main():
"""
The main heartbeat
"""
logging.basicConfig(format='%(asctime)s %(message)s',
datefmt='%Y%m%d %H%M%S',
filename='crawler.log',
level=logging.DEBUG)
crawler()
def ipfs_diag_net():
"""
Gets raw output from:
ipfs diag net
"""
return subprocess.check_output(["ipfs", "diag", "net"])
def get_nodes_ids(ipfs_diag_net_out):
"""
Parsing nodes IDs
"""
node_ids_set = set()
for line in ipfs_diag_net_out.split("\n"):
line = line.strip()
if line.startswith("ID"):
line = line.strip().split(" ")[1]
node_ids_set.add(line)
return node_ids_set
def crawler():
"""
From 'id <id>'
subprocess.check_output(["ipfs", "id", _id])
"""
logging.info("Running \'ipfs diag net\'")
ipfs_diag_net_output=ipfs_diag_net()
logging.info("Getting nodes IDs")
nodes_ids_set = get_nodes_ids(ipfs_diag_net_output)
logging.info("Found %s IDs", len(nodes_ids_set))
mongo_client = pymongo.MongoClient()
ipfs_db = mongo_client.ipfs.id2ip
for _id in nodes_ids_set:
ips_set = set()
nodes_info_dict = dict()
geolocation_list = list()
try:
logging.info("Getting node info with \'ipfs id %s\'", _id)
#todo: multithreading
id_str = subprocess.check_output(["ipfs", "id", _id])
id_json = json.loads(id_str)
addresses = id_json["Addresses"]
if isinstance(addresses, list):
addresses_set = address_list2address_set(addresses)
logging.info("Iterating through IPs %s", addresses_set)
for ip in addresses_set:
logging.info("Checking IP %s ", ip)
if not ipaddress.ip_address(unicode(ip)).is_private:
ips_set.add(ip)
else:
logging.info("Did not got info from %s. Probably \'null\' address list.", _id)
nodes_info_dict = ({_id:ips_set})
geolocation_list = geolocation(nodes_info_dict[_id])
if geolocation_list:
geolocation_to_mdb(geolocation_list, _id, nodes_info_dict[_id],
id_json["AgentVersion"], id_json["ProtocolVersion"],
id_json["PublicKey"], ipfs_db)
except:
error = sys.exc_info()
logging.error("Error processing node %s: %s", _id, error)
def geolocation(ips_set):
"""
Geolocation function
"""
geolocation_list = list()
for node_ip in ips_set:
logging.info("Getting geolocation object for external IP %s", node_ip)
match = geolite2.lookup(node_ip)
if match is not None:
geolocation_list.append(match)
return geolocation_list
def geolocation_to_mdb(geolocation_list, node_id, ips_set, agent_version,
protocol_version, public_key, ipfs_db):
"""
Update location, ip and country and other info to mongoDB ( do not insert new ones )
"""
for node in geolocation_list:
logging.info("Writing data to mongoDB for %s", node.ip)
document = {"node_id":node_id,
# "ips_set":str(ips_set),
"ip":node.ip,
"agent_version":agent_version,
"protocol_version":protocol_version,
"public_key":public_key,
"country":node.country,
"continent":node.continent,
"subdivisions":str(node.subdivisions),
"timezone":node.timezone,
"location":node.location}
ipfs_db.replace_one(document, document, upsert=True)
if __name__ == "__main__":
main()
``` |
{
"source": "JMante1/Excel-to-SBOL",
"score": 3
} |
#### File: excel2sbol/excel2sbol/initialise_functions.py
```python
import os
import json
import pandas as pd
import excel2sbol.column_functions as cf
class table:
"""Used to go from a regular dictionary to a dictionary of column objects
"""
def __init__(self, table_doc_path, column_read_dict):
"""[summary]
Args:
table_doc_path (str): Full path to the input sheet
E.g. 'C:/users/user/filled.xlsx'
column_read_dict (dict): Dictionary of columns each with
a dictionary. It takes the form:
{column_name1: {'SBOL Term': 'sbol_term',
'Namespace URL': 'nm_url',
'Sheet Lookup': 'TRUE',
'Replacement Lookup': 'TRUE',
'Sheet Name': 'Replacement',
'From Col': 'A', 'To Col': 'B'},
column_name2: {'SBOL Term': 'sbol_term',
'Namespace URL': 'nm_url',
'Sheet Lookup': 'TRUE',
'Replacement Lookup': 'TRUE',
'Sheet Name': 'Replacement',
'From Col': 'A', 'To Col': 'B'},
}
Raises:
TypeError: If column_read_dict is not a dictionary a TypeError
is raised.
"""
if not isinstance(column_read_dict, dict):
raise TypeError
self.column_list = {}
for key, value in column_read_dict.items():
self.column_list[key] = cf.column(table_doc_path, value)
def read_in_sheet(templt_name, file_path_in):
"""This reads in an excel file and creates a series of dictionaries
containing the relevant information based on the templt used.
Args:
templt_name (string): The name of the templt being used. It must be
one of the names found in the file templt_constants.txt
e.g. "darpa_templt_blank_v006_20210405.xlsx"
file_path_in (string): The full filepath to the excel spreadsheet that
needs to be read in.
E.g. "C:\\Users\\Tester\\Downloads\\MyNiceLibrary.xlsx"
Returns:
column_read_dict (dictionary): The table of parts from
excel in the format
{
part_name1: {col_nm1:col_val1,
col_nm2:col_val2},
part_name2: {col_nm1:col_val1,
col_nm2:col_val2}
}
sheet_dict (dictionary): The column handelling sheet read in as a
dictionary of the format
{
col_nm1: {"SBOL Term":value1,
"Namespace URL":value2,
"Sheet Lookup":value3,
"Replacement Lookup":value4,
"Sheet Name":value5,
"From Col":value6,
"To Col":value7},
col_nm2: {"SBOL Term":value1,
"Namespace URL":value2,
"Sheet Lookup":value3,
"Replacement Lookup":value4,
"Sheet Name":value5,
"From Col":value6,
"To Col":value7}
}
description_info (string): The library description from the
'Design Description' box
collection_info (dictionary): The collection information
as a library of the format:
{
"Collection Name":value1,
"Institution to Build":value2,
"Date Created":value3,
"Date Last Updated":value4,
"Authors":value5,
"Date Accepted":value6,
"Person Accepting":value7,
"SynBioHub Collection":value8
}
"""
file_dir = os.path.dirname(__file__)
with open(os.path.join(file_dir,
'template_constants.txt')) as f:
templt_dict = json.loads(f.read())
# pull values from templt dict
start_row = templt_dict[templt_name]["library_start_row"]
sheet_name = templt_dict[templt_name]["sheet_name"]
collection_rows = templt_dict[templt_name]["number_of_collection_rows"]
description_start_row = templt_dict[templt_name]["description_start_row"]
collection_cols = templt_dict[templt_name]["collection_columns"]
description_cols = templt_dict[templt_name]["description_columns"]
# pull in collection info
collection_info = pd.read_excel(file_path_in, sheet_name=sheet_name,
header=None, nrows=collection_rows,
usecols=collection_cols,
index_col=0, engine='openpyxl').to_dict('index')
description_info = pd.read_excel(file_path_in, sheet_name=sheet_name,
header=None,
skiprows=description_start_row, nrows=1,
usecols=description_cols, engine='openpyxl').iloc[0, 0]
# read in the body of the sheet
sheet_read = pd.read_excel(file_path_in, sheet_name=sheet_name, header=0,
skiprows=start_row, engine='openpyxl').fillna("")
sheet_dict = sheet_read.to_dict('index')
# pull in the column definitions from the excel sheet
column_read_dict = pd.read_excel(file_path_in,
sheet_name="column_definitions", header=0,
index_col=0, engine='openpyxl')
column_read_dict = column_read_dict.to_dict('index')
return (column_read_dict, sheet_dict, description_info, collection_info)
``` |
{
"source": "JMante1/Plugin-Curation-Test",
"score": 3
} |
#### File: JMante1/Plugin-Curation-Test/form_output_test.py
```python
from flask import Flask, request
import os
app = Flask(__name__)
@app.route("/status")
def status():
return("The Form Output Server is up and running")
@app.route("/form_output", methods=["POST"])
def form_output():
data = request.form
data = dict(data.lists())
cwd = os.getcwd()
filename = os.path.join(cwd, "form_return.txt")
with open(filename, 'w') as f:
f.write(str(data))
return("All is well")
```
#### File: Plugin-Curation-Test/tests/conftest.py
```python
from app import app
import pytest
import json
@pytest.fixture
def client():
return app.test_client()
@pytest.fixture
def eval_dict():
eval_dict = {"type": "Component"}
eval_json = json.dumps(eval_dict)
return eval_json
@pytest.fixture
def run_dict():
run_dict = {"complete_sbol": "https://dev.synbiohub.org/public/igem/BBa_E0040/1/sbol",
"shallow_sbol": "https://dev.synbiohub.org/public/igem/BBa_E0040/1/sbolnr",
"genbank": "https://dev.synbiohub.org/public/igem/BBa_E0040/1/gb",
"top_level": "https://synbiohub.org/public/igem/BBa_E0040/1",
"size": 5,
"type": "Component",
"instanceUrl": "https://dev.synbiohub.org/"
}
run_json = json.dumps(run_dict)
return run_json
``` |
{
"source": "jmanteau/aio-ipfabric",
"score": 2
} |
#### File: aio-ipfabric/aioipfabric/api.py
```python
from typing import Optional
from dataclasses import dataclass
# -----------------------------------------------------------------------------
# Public Imports
# -----------------------------------------------------------------------------
from httpx import AsyncClient
# -----------------------------------------------------------------------------
# Exports
# -----------------------------------------------------------------------------
__all__ = ["IPFSession"]
# -----------------------------------------------------------------------------
#
# CODE BEGINS
#
# -----------------------------------------------------------------------------
@dataclass
class URIs:
""" identifies API URL endpoings used"""
login = "auth/login"
token_refresh = "auth/token"
class IPFSession(AsyncClient):
"""
The IPFSession instance is the asyncio base client used to interact with the
IP Fabric API via REST calls. The primary feature of the IPFSession class
is to handle the authentication via login credentials and tokens.
An instance of this class will be created by the IPFBaseClient, which in
turns makes the api accessbile to any IPFabricClient instances.
"""
def __init__(self, base_url, token=None, username=None, password=None):
"""
Initialize the asyncio client session to the IP Fabric API
Parameters
----------
base_url: str
The base URL of the IP fabric system
token: str
The refresh token
username: str
The login user-name
password: str
The login password
"""
super().__init__(base_url=base_url, verify=False)
self.__refresh_token = token
self.__access_token = None
if all((username, password)):
self.__init_auth = self.__auth_userpass(
username=username, password=password
)
elif token:
self.__init_auth = self.refresh_token(token)
else:
raise RuntimeError("MISSING required token or (username, password)")
self.headers["Content-Type"] = "application/json"
# -------------------------------------------------------------------------
#
# Properties
#
# -------------------------------------------------------------------------
@property
def token(self):
""" return the Refresh Token for later use/storage """
return self.__refresh_token
# -------------------------------------------------------------------------
#
# Public Methods
#
# -------------------------------------------------------------------------
async def authenticate(self):
"""
This coroutine is used to authenticate to the IPF server and obtain an access
token. This coroutine can be used for both the initial login process as well
as the token refresh process.
"""
# the first time this method is called use the coroutine as selected in
# the __init__ method based on the provided credentials. Any subsequent
# call to `authenticate` will use `refresh_token`. The code below uses
# the try/except catching the RuntimeError to detected the "first use"
# vs. subsequent uses.
try:
await self.__init_auth
except RuntimeError:
await self.refresh_token(self.__refresh_token)
async def refresh_token(self, token: Optional[str] = None):
""" using the refresh token, obtain a new access token """
if token:
self.__refresh_token = token
assert self.__refresh_token is not None
await self.__refresh_access_token(self.__refresh_token)
self.headers["Authorization"] = f"Bearer {self.__access_token}"
# -------------------------------------------------------------------------
#
# Private Methods
#
# -------------------------------------------------------------------------
async def __refresh_access_token(self, refresh_token):
""" underlying API call to update the access token """
res = await self.post(URIs.token_refresh, json={"refreshToken": refresh_token})
res.raise_for_status()
body = res.json()
self.__access_token = body["accessToken"]
async def __auth_userpass(self, username, password):
""" underlying API to call to authenticate using login credentials """
res = await self.post(
URIs.login, json={"username": username, "password": password}
)
res.raise_for_status()
body = res.json()
self.__access_token = body["accessToken"]
self.__refresh_token = body["refreshToken"]
self.headers["Authorization"] = f"Bearer {self.__access_token}"
``` |
{
"source": "jmanteau/evpn-cicd-arista-containerlab",
"score": 2
} |
#### File: evpn-cicd-arista-containerlab/bin/sshconfig.py
```python
from os.path import expanduser
import copy
class SshConfig:
"""
SSH configuration. An SshConfig consists of multiple SshConfigEntries, with
at least one general entry applying to all hosts that could not be matched
to other entries.
"""
def __init__(self, default=None, **hosts):
""" Create an SshConfig.
@param default: The default values that must be used. Defaults to None.
@keyword hosts: Host entries in the form of host=options where options
is a SshConfigEntry. If options is None, the host will be ignored.
"""
self.__entries = {}
if not default is None:
self.set(None, default)
for h, e in hosts.items():
if not e is None:
self.set(h, e)
def get(self, host, option=None):
""" Get the entry for the host. If the host does not have an entry,
None is returned. To get the default entry, use None as a value.
If the option is supplied, this is equivalent to
get(host).get(option)
@param host: The hostname to look for.
@param option: The option to look for.
"""
host_name = "ssh_%s" % host
if host is None:
host_name = "default"
if host_name not in self.__entries:
return None
if option is None:
return self.__entries[host_name]
else:
return self.__entries[host_name].get(option)
def set(self, host, entry=None, **options):
""" Set the entry options for a specific host and create an entry if
there is none yet. If this is a new host, it will have a priority equal
to the number of entries. This can be changed with the set_priority()
method of SshConfigEntry.
@param host: The host to set the entry for.
@param entry: The entry to set. Either a dict-like object with the
options as keys or an SshConfigEntry.
@keyword options: SSH options and a corresponding value in the form of
option=value. If value is None, the option is ignored.
"""
host_name = "ssh_%s" % host
if host is None:
host_name = "default"
if (entry is None
and len([1 for k in options
if (not options[k] is None
or len(options[k]) == 0)]) == 0):
# Nothing to do, just exit
return
e = SshConfigEntry(len(self.__entries))
if not entry is None:
e.set(entry)
if len(options) > 0:
e.set(**options)
# Guarantee that an entry will have entries
if len(e) > 0:
if host not in self or (host is None and "default" not in
self.__entries):
self.__entries[host_name] = e
else:
self.get(host).set(e)
def __delitem__(self, host):
""" Remove an entire host entry
@param host: the host entry to remove.
"""
if host in self:
del self.__entries["ssh_%s" % host]
elif host is None:
del self.__entries["default"]
else:
raise KeyError(host)
def remove(self, host, *options):
""" Remove a host entry or specific options in a host entry.
@param host: the host for which to remove the entire entry
@param options: the name of the options to be removed so that it does
not exist afterwards. It need not exist beforehand. If no options
are supplied, the host entry is removed.
"""
if len(options) == 0:
del self[host]
else:
entry = self.get(host)
entry.remove(*options)
if len(entry) == 0:
self.remove(host)
def __contains__(self, host):
""" If we have an entry for the specified host
@param host: The host to check for.
"""
if host is None:
return False
host_name = "ssh_%s" % host
return host_name in self.__entries
def hosts(self):
""" Return all the hostnames """
return [x.partition("ssh_")[2] for x in self.__entries.keys() if
x.find("ssh_", 0, 4) >= 0]
def save(self, dest):
""" Save the configuration somewhere safe.
@param dest: A filename or a file-like object. If the file already
exists, it will be overwritten.
"""
if (isinstance(dest, file)):
dest.write(str(self))
elif isinstance(dest, str):
f = open(dest, "w")
f.write(str(self))
f.close()
else:
raise TypeError("Argument is not a file or str")
def load(self, config):
""" Load a configuration.
@param config: A configuration to load. Must be a file-like object or a
filename.
"""
cfg = load_sshconfig(config)
hosts = [None]
hosts.extend(cfg.hosts())
for h in hosts:
self.set(h, cfg.get(h))
def __repr__(self):
""" Representative string. Will encode a SshConfig as
SshConfig(host=entry, ...). host will be the name for the host entry
and entry will be encoded as SshConfigEntry.
"""
rep = "SshConfig("
entries = []
for k, v in self.__entries.items():
if v is None:
continue
if k == "default":
entries.append(repr(v))
else:
entries.append("%s = %s" % (k.partition('ssh_')[2], repr(v)))
rep += ", ".join(entries)
rep += ")"
return rep
def __str__(self):
""" Gives the ssh_config represenation of the entry. """
lines = []
sortfunc = lambda t: t[1].priority()
for h, e in sorted(self.__entries.items(), key=sortfunc):
opts = str(e)
if not h == "default":
lines.append("Host %s" % h.partition("ssh_")[2])
opts = "\n".join([" %s" % s for s in opts.split("\n")])
lines.append(opts)
return "\n".join(lines)
class SshConfigEntry:
""" A collection of SSH options pertaining to a group of hosts """
def __add_to_opts(self, ddict=None, llist=None, ttuple=None):
try:
k = self.__options
del k
except AttributeError:
self.__options = {}
if llist is not None and len(llist) > 0:
for t in llist:
if not (t[1] is None or t[0] is None):
self.__options[str(t[0])] = t[1]
if ddict is not None and len(ddict) > 0:
for o, v in ddict.items():
if not (o is None or v is None):
self.__options[o] = v
if ttuple is not None and len(ttuple) >= 2:
if not (ttuple[0] is None or ttuple[1] is None):
self.__options[ttuple[0]] = ttuple[1]
def __init__(self, priority, entry=None, **options):
""" Create an SshConfigEntry.
@param priority: The priority for this entry.
@param entry: The contents of the entry. Can be either another
SshConfigEntry or a dict-like object.
@keyword options: Options in the form of option=value where value is
the value for the option. If value is None, option is ignored.
"""
self.__options = {}
self.__priority = priority
if not entry is None:
if isinstance(entry, SshConfigEntry):
opts = entry.items()
self.__add_to_opts(ddict=opts)
elif isinstance(entry, dict):
self.__add_to_opts(ddict=entry)
else:
err = "SshConfigEntry(entry): entry is not"
err += " of type SshConfigEntry or dict"
raise TypeError(err)
if len(options) > 0:
self.__add_to_opts(ddict=options)
def priority(self):
""" Get the priority of this host entry. This is used for ordering in
the eventual ssh_config.
"""
return self.__priority
def set_priority(self, priority):
""" Set the priority of the entry. If None is supplied, nothing
happens.
@param priority: The new priority. A value of None will have no effect
"""
if priority is None:
return
else:
self.__priority = int(priority)
def get(self, option):
""" Get the value for a specific option.
@param option: A valid SSH option. If it does not exist, None is
returned.
"""
try:
return self.__options[option]
except KeyError:
return None
def set(self, option=None, value=None, **options):
""" Set the value for a specific option. Options with a name or value
of None will be ignored.
@param option: An SshConfigEntry or a dict-like object with SSH
options as keys.
@param option: A valid SSH option name
@param value: Value for the option
@keyword options: Options in the form of option=value where
value is the value for option. If value is None, option is
ignored.
"""
if not option is None and value is None:
if isinstance(option, SshConfigEntry):
self.__add_to_opts(ddict=option.__options)
self.set_priority(option.priority())
elif isinstance(option, dict):
self.__add_to_opts(ddict=option)
else:
pass
elif not option is None and not value is None:
self.__add_to_opts(ttuple=(option, value))
if len(options) > 0:
self.__add_to_opts(ddict=options)
def remove(self, option, *options):
""" Remove the specified entries.
@param option: The option to remove. It will not exist afterwards. It
need not exist beforehand.
@param options: The additional options to remove (optional).
"""
opts = [option]
opts.extend(options)
for opt in opts:
try:
del self[opt]
except KeyError:
pass
def __delitem__(self, option):
""" Remove the specified option.
@param option: the option to remove.
@raise KeyError: when the option does not exist.
"""
if option in self:
del self.__options[option]
else:
raise KeyError(option)
def __contains__(self, option):
""" Whether the SshConfigEntry contains the specified option
@param option: A valid SSH option
"""
return option in self.__options
def __len__(self):
""" Return the number of defined options """
return len(self.__options)
def to_dict(self):
""" Converts the SshConfigEntry to a dict. """
l = {}
l.update(self.__options)
return l
def items(self):
""" Return the options that have a value. """
return [x for x in self.__options.items() if not x[1] is None]
def options(self):
""" Return all option names. """
l = []
l.extend([str(x[0]) for x in self.items()])
return l
def __repr__(self):
""" Representative string. Will encode as
SshConfigEntry(priority, optionN=valueN, ...). """
rep = "SshConfigEntry(%d" % self.priority()
entries = []
for k, v in self.__options.items():
if v is None:
continue
entries.append("%s = \"%s\"" % (k, v))
if len(entries) > 0:
rep += ", "
rep += ", ".join(entries)
rep += ")"
return rep
def __str__(self):
""" String representation resulting in ssh_config-like formatting. """
lines = []
for k, v in self.__options.items():
if v is None:
continue
lines.append("%s %s" % (k, v))
return "\n".join(lines)
def load_sshconfig(config):
""" Parses a ssh_config to an SshConfig
@param config: A filename or a file-like object.
"""
cfgfile = []
if isinstance(config, str):
k = open(config, 'r')
cfgfile = k.readlines()
k.close()
elif isinstance(config, file):
cfgfile = config.readlines()
else:
raise TypeError("config is not a string or file")
ssh_cfg = SshConfig()
host_name = None
host_entry = SshConfigEntry(0)
priority = 0
for line in cfgfile:
line = line.strip().split('#')[0]
option = line.split(' ')[0]
value = " ".join(line.strip().split(' ')[1:])
if len(option) == 0:
# we have a comment!
continue
elif option == "Host":
ssh_cfg.set(host_name, host_entry)
priority += 1
host_name = value
host_entry = SshConfigEntry(priority)
else:
host_entry.set(option, value)
ssh_cfg.set(host_name, host_entry)
return ssh_cfg
```
#### File: evpn-cicd-arista-containerlab/netbox-interact/netbox-extract.py
```python
from pprint import pprint
from collections import defaultdict
import yaml
import pynetbox
def get_netbox():
"""
Return Netbox API handler
Returns:
pynetbox.API -- Netbox API handler
"""
nburl = "http://1192.168.127.12:8000/"
NETBOX_TOKEN = "<KEY>"
session = requests.Session()
session.verify = False # https://pynetbox.readthedocs.io/en/latest/advanced.html#ssl-verification
nb = pynetbox.api(url=nburl, token=NETBOX_TOKEN, threading=True)
nb.http_session = session
return nb
nb = get_netbox()
def ddict():
return defaultdict(ddict)
def ddict2dict(d):
for k, v in d.items():
if isinstance(v, dict):
d[k] = ddict2dict(v)
return dict(d)
structured_config = ddict()
structured_config["router_bgp"]= {} #TODO
structured_config["static_routes"]= {} #TODO
structured_config["service_routing_protocols_model"]= "multi-agent"
structured_config["ip_routing"]= True
structured_config["vlan_internal_order"]["allocation"]= "ascending"
structured_config["vlan_internal_order"]["range"]["beginning"]= 1006
structured_config["vlan_internal_order"]["range"]["ending"]= 1199
structured_config["name_server"] #TODO
structured_config["spanning_tree"]["mode"] = "mstp"
structured_config["spanning_tree"]["mst_instances"]["0"]["priority"] = 4096
userscf= nb.extras.config_contexts.get(name='local-users').data
users= userscf["system"]["aaa"]["authentication"]["users"]
for user in users:
structured_config["local_users"][user]["privilege"]= users[user]['privilege']
structured_config["local_users"][user]["sha512_password"]= users[user]['password']
structured_config["local_users"][user]["role"]= users[user]['role']
structured_config["vrfs"] #TODO
structured_config["management_interfaces"] #TODO
structured_config["management_api_http"] #TODO
structured_config["ethernet_interfaces"] #TODO
structured_config["loopback_interfaces"] #TODO
structured_config["vlan_interfaces"] #TODO
structured_config["vxlan_interface"] #TODO
structured_config["prefix_lists"] #TODO
structured_config["route_maps"] #TODO
structured_config["router_bfd"] #TODO
structured_config["vlans"] #TODO
structured_config["ip_igmp_snooping"]["globally_enabled"]= True
structured_config["ip_virtual_router_mac_address"] = "00:00:00:00:00:01"
structured_config["virtual_source_nat_vrfs"] #TODO
output=yaml.dump(ddict2dict(structured_config), allow_unicode=True, default_flow_style=False)
print(output)
``` |
{
"source": "jmanteau/SPFlatten",
"score": 2
} |
#### File: jmanteau/SPFlatten/SPFlatten.py
```python
import re, dns.resolver
#-----------------------------------------------------
# SPFlattener - Because who needs limits??
# Requires: dnspython
# Usage: edit the "root_domain" variable below and run
#-----------------------------------------------------
# To-do:
# Confirm that SPF doesn't follow CNAMES (I don't think it does)
# Should we consider Sender ID? ie spf2.0 (probably not)
#---------------------------------
root_domain = "google.com"
#---------------------------------
spf_ip_list = []
spf_nonflat_mechanisms = []
def main():
global all_mechanism
all_mechanism = ""
flatten_spf(root_domain)
dedupe_spf_ip_list = list(set(spf_ip_list))
flat_spf = "v=spfv1"
for ip in dedupe_spf_ip_list:
if re.match(r'.*:.*', ip):
flat_spf += (" ip6:" + ip)
else:
flat_spf += (" ip4:" + ip)
for mechanism in spf_nonflat_mechanisms:
flat_spf += mechanism
flat_spf += all_mechanism
print "\nFlattened SPF:\n----------------------\n", flat_spf
# Recursively flatten the SPF record for the specified domain
def flatten_spf(domain):
print "--- Flattening:", domain, "---"
try:
txt_records = dns.resolver.query(domain, "TXT")
except dns.exception.DNSException:
print "No TXT records for:", domain
return
for record in txt_records:
print "TXT record for:", domain, ":", str(record)
fields = str(record)[1:-1].split(' ')
if re.match(r'v=spf1', fields[0]):
for field in fields:
parse_mechanism(field, domain)
# Parse the given mechansim, and dispatch it accordintly
def parse_mechanism(mechanism, domain):
if re.match(r'^a$', mechanism):
convert_domain_to_ipv4(domain)
elif re.match(r'^mx$', mechanism):
convert_mx_to_ipv4(domain)
elif re.match(r'^a:.*$', mechanism):
match = re.match(r'^a:(.*)$', mechanism)
convert_domain_to_ipv4(match.group(1))
elif re.match(r'^ip4:.*$', mechanism):
match = re.match(r'^ip4:(.*)$', mechanism)
print "IPv4 address found for", domain, ":", match.group(1)
spf_ip_list.append(match.group(1))
elif re.match(r'^ip6:.*$', mechanism):
match = re.match(r'^ip6:(.*)$', mechanism)
print "IPv6 address found for", domain, ":", match.group(1)
spf_ip_list.append(match.group(1))
elif re.match(r'^ptr.*$', mechanism):
print "PTR found for", domain, ":", mechanism
spf_nonflat_mechanisms.append(mechanism)
elif re.match(r'^exists:$', mechanism):
print "Exists found for", domain, ":", mechanism
spf_nonflat_mechanisms.append(mechanism)
elif re.match(r'^redirect:$', mechanism):
print "Redirect found for", domain, ":", mechanism
spf_nonflat_mechanisms.append(mechanism)
elif re.match(r'^exp:$', mechanism):
print "EXP found for", domain, ":", mechanism
spf_nonflat_mechanisms.append(mechanism)
elif re.match(r'^.all$', mechanism):
if domain == root_domain:
match = re.match(r'^(.all)$', mechanism)
print "All found for", domain, ":", match.group(1)
all_mechanism = " " + str(match.group(1))
elif re.match(r'^include:.*$', mechanism):
match = re.match(r'^include:(.*)', mechanism)
flatten_spf(match.group(1)) # recursion
# Convert A/AAAA records to IPs and adds them to the SPF master list
def convert_domain_to_ipv4(domain):
a_records = []
aaaa_records = []
try:
a_records = dns.resolver.query(domain, "A")
for ip in a_records:
print "A record for", domain, ":", str(ip)
spf_ip_list.append(str(ip))
except dns.exception.DNSException:
pass
try:
aaaa_records = dns.resolver.query(domain, "AAAA")
for ip in aaaa_records:
print "A record for", domain, ":", str(ip)
spf_ip_list.append(str(ip))
except dns.exception.DNSException:
pass
# Convert MX records to IPs and adds them to the SPF master list
def convert_mx_to_ipv4(domain):
try:
mx_records = dns.resolver.query(domain, "MX")
except dns.exception.DNSException:
return
for record in mx_records:
mx = str(record).split(' ')
print "MX record found for ", domain, ": ", mx[1]
convert_domain_to_ipv4(mx[1])
if __name__ == "__main__": main()
``` |
{
"source": "jmanuel1/concat",
"score": 2
} |
#### File: concat/concat/astutils.py
```python
from typing import (
Union,
List,
Tuple,
Iterable,
Optional,
Sequence,
Iterator,
cast,
)
import ast
import concat.visitors
import concat.level0.parse
# Typedefs
WordsOrStatements = Sequence[
Union['concat.level0.parse.WordNode', 'concat.level0.parse.StatementNode']
]
Words = List['concat.level0.parse.WordNode']
Location = Tuple[int, int]
_TranspilerDict = concat.visitors.VisitorDict[
'concat.level0.parse.Node', ast.AST
]
# AST Manipulation utilities
def pop_stack(index: int = -1) -> ast.Call:
load = ast.Load()
stack = ast.Name(id='stack', ctx=load)
pop = ast.Attribute(value=stack, attr='pop', ctx=load)
pop_call = ast.Call(func=pop, args=[ast.Num(index)], keywords=[])
return pop_call
def to_transpiled_quotation(
words: Words, default_location: Tuple[int, int], visitors: _TranspilerDict
) -> ast.expr:
quote = concat.level0.parse.QuoteWordNode(
list(words), list(words)[0].location if words else default_location
)
py_quote = visitors['quote-word'].visit(quote)
return cast(ast.expr, py_quote)
def pack_expressions(expressions: Iterable[ast.expr]) -> ast.Subscript:
load = ast.Load()
subtuple = ast.Tuple(elts=[*expressions], ctx=load)
index = ast.Index(value=ast.Num(n=-1))
last = ast.Subscript(value=subtuple, slice=index, ctx=load)
return last
def to_python_decorator(
word: 'concat.level0.parse.WordNode', visitors: _TranspilerDict
) -> ast.Lambda:
push_func = cast(
ast.Expression, ast.parse('stack.append(func)', mode='eval')
).body
py_word = cast(ast.expr, visitors['word'].visit(word))
body = pack_expressions([push_func, py_word, pop_stack()])
func_arg = ast.arg('func', None)
arguments = ast.arguments(
args=[func_arg],
vararg=None,
kwonlyargs=[],
kwarg=None,
defaults=[],
kw_defaults=[],
)
decorator = ast.Lambda(args=arguments, body=body)
return decorator
def remove_leading_dots(relative_module: str) -> Optional[str]:
index = 0
for i, char in enumerate(relative_module):
if char != '.':
index = i
break
return relative_module[index:] or None
def count_leading_dots(relative_module: str) -> int:
count = 0
for char in relative_module:
if char != '.':
break
count += 1
return count
# TODO: I think this is overly complicated. We should just establish a simple
# calling convention, like:
# With the arguments (self, *args, **kwargs) to a magic function, self is
# pushed onto the stack, each of args is pushed onto the stack, then kwargs is
# pushed onto the stack. There will have to be various exceptions to this (like
# with __init__), but it will lead to a lot more consistency.
def correct_magic_signature(statement: ast.stmt) -> ast.stmt:
if isinstance(statement, ast.FunctionDef):
name = statement.name
if name == '__new__':
args = statement.args.args
args[:0] = [ast.arg('cls', None)]
push_cls = ast.parse('stack.append(cls)').body[0]
body = statement.body
body[:0] = [push_cls]
elif name == '__init__' or name == '__call__':
args = statement.args.args
args[:0] = [ast.arg('self', None)]
push_self = ast.parse('stack.append(self)').body[0]
body = statement.body
body[:0] = [push_self]
elif name in {
'__del__',
'__repr__',
'__str__',
'__bytes__',
'__hash__',
'__bool__',
'__dir__',
'__len__',
'__length_hint__',
'__aenter__',
'__anext__',
'__aiter__',
'__await__',
'__enter__',
'__ceil__',
'__floor__',
'__trunc__',
'__index__',
'__float__',
'__int__',
'__complex__',
'__invert__',
'__abs__',
'__pos__',
'__neg__',
'__reversed__',
'__iter__',
}:
statement.args.args = [ast.arg('self', None)]
push_self, pop_return = ast.parse(
'stack.append(self)\nreturn stack.pop()'
).body
body = statement.body
body[:0] = [push_self]
body.append(pop_return)
elif name in {'__format__'}:
statement.args.args = [
ast.arg('self', None),
ast.arg('format_spec', None),
]
push_format_spec, push_self, pop_return = ast.parse(
'stack.append(format_spec)\n'
'stack.append(self)\n'
'return stack.pop()'
).body
body = statement.body
body[:0] = [push_format_spec, push_self]
body.append(pop_return)
elif name in {
'__lt__',
'__le__',
'__eq__',
'__ne__',
'__gt__',
'__ge__',
'__ior__',
'__ixor__',
'__iand__',
'__irshift__',
'__ilshift__',
'__imod__',
'__ifloordiv__',
'__itruediv__',
'__imatmul__',
'__imul__',
'__isub__',
'__iadd__',
'__ror__',
'__rxor__',
'__rand__',
'__rrshift__',
'__rlshift__',
'__rmod__',
'__rfloordiv__',
'__rtruediv__',
'__rmatmul__',
'__rmul__',
'__rsub__',
'__radd__',
'__rpow__',
'__or__',
'__xor__',
'__and__',
'__rshift__',
'__lshift__',
'__mod__',
'__floordiv__',
'__truediv__',
'__matmul__',
'__mul__',
'__sub__',
'__add__',
}:
statement.args.args = [
ast.arg('self', None),
ast.arg('other', None),
]
push_self, push_other, pop_return = ast.parse(
'stack.append(self)\n'
'stack.append(other)\n'
'return stack.pop()'
).body
body = statement.body
body[:0] = [push_self, push_other]
body.append(pop_return)
elif name in {'__getattr__', '__getattribute__', '__delattr__'}:
statement.args.args = [
ast.arg('self', None),
ast.arg('name', None),
]
push_name, push_self, pop_return = ast.parse(
'stack.append(name)\n'
'stack.append(self)\n'
'return stack.pop()'
).body
body = statement.body
body[:0] = [push_name, push_self]
body.append(pop_return)
elif name in {'__setattr__'}:
statement.args.args = [
ast.arg('self', None),
ast.arg('name', None),
ast.arg('value', None),
]
push_args, pop_return = ast.parse(
'stack += [value, name, self]\nreturn stack.pop()'
).body
body = statement.body
body[:0] = [push_args]
body.append(pop_return)
elif name in {'__get__'}:
statement.args.args = [
ast.arg('self', None),
ast.arg('instance', None),
ast.arg('owner', None),
]
push_args, pop_return = ast.parse(
'stack += [owner, instance, self]\nreturn stack.pop()'
).body
body = statement.body
body[:0] = [push_args]
body.append(pop_return)
elif name in {'__set__'}:
statement.args.args = [
ast.arg('self', None),
ast.arg('instance', None),
ast.arg('value', None),
]
push_args, pop_return = ast.parse(
'stack += [value, instance, self]\nreturn stack.pop()'
).body
body = statement.body
body[:0] = [push_args]
body.append(pop_return)
elif name in {'__delete__', '__instancecheck__'}:
statement.args.args = [
ast.arg('self', None),
ast.arg('instance', None),
]
push_args, pop_return = ast.parse(
'stack += [instance, self]\nreturn stack.pop()'
).body
body = statement.body
body[:0] = [push_args]
body.append(pop_return)
elif name in {'__init_subclass__'}:
statement.args.args = [ast.arg('cls', None)]
statement.args.kwarg = ast.arg('kwargs', None)
push_args, pop_return = ast.parse(
'stack += [kwargs, self]\nreturn stack.pop()'
).body
body = statement.body
body[:0] = [push_args]
body.append(pop_return)
elif name in {'__prepare__'}:
statement.args.args = [
ast.arg('cls', None),
ast.arg('name', None),
ast.arg('bases', None),
]
statement.args.kwarg = ast.arg('kwds', None)
push_args, pop_return = ast.parse(
'stack += [kwds, bases, name, cls]\nreturn stack.pop()'
).body
body = statement.body
body[:0] = [push_args]
body.append(pop_return)
elif name in {'__subclasscheck__'}:
statement.args.args = [
ast.arg('self', None),
ast.arg('subclass', None),
]
push_args, pop_return = ast.parse(
'stack += [subclass, self]\nreturn stack.pop()'
).body
body = statement.body
body[:0] = [push_args]
body.append(pop_return)
elif name in {'__getitem__', '__missing__', '__delitem__'}:
statement.args.args = [ast.arg('self', None), ast.arg('key', None)]
push_args, pop_return = ast.parse(
'stack += [key, self]\nreturn stack.pop()'
).body
body = statement.body
body[:0] = [push_args]
body.append(pop_return)
elif name in {'__aexit__', '__exit__'}:
statement.args.args = [
ast.arg('self', None),
ast.arg('exc_type', None),
ast.arg('exc_value', None),
ast.arg('traceback', None),
]
push_args, pop_return = ast.parse(
'stack += [traceback, exc_value, exc_type, self]\n'
'return stack.pop()'
).body
body = statement.body
body[:0] = [push_args]
body.append(pop_return)
elif name in {'__round__'}:
statement.args.args = [
ast.arg('self', None),
ast.arg('ndigits', None),
]
push_args, pop_return = ast.parse(
'stack += [ndigits, self]\nreturn stack.pop()'
).body
body = statement.body
body[:0] = [push_args]
body.append(pop_return)
elif name in {'__ipow__', '__pow__'}:
statement.args.args = [
ast.arg('self', None),
ast.arg('other', None),
ast.arg('modulo', None),
]
statement.args.defaults = [ast.Num(1)]
push_args, pop_return = ast.parse(
'stack += [self, other, modulo]\nreturn stack.pop()'
).body
body = statement.body
body[:0] = [push_args]
body.append(pop_return)
elif name in {'__contains__'}:
statement.args.args = [
ast.arg('self', None),
ast.arg('item', None),
]
push_args, pop_return = ast.parse(
'stack += [item, self]\nreturn stack.pop()'
).body
body = statement.body
body[:0] = [push_args]
body.append(pop_return)
elif name in {'__setitem__'}:
statement.args.args = [
ast.arg('self', None),
ast.arg('key', None),
ast.arg('value', None),
]
push_args, pop_return = ast.parse(
'stack += [value, key, self]\nreturn stack.pop()'
).body
body = statement.body
body[:0] = [push_args]
body.append(pop_return)
return statement
def statementfy(node: Union[ast.expr, ast.stmt]) -> ast.stmt:
if isinstance(node, ast.expr):
call_node = call_concat_function(node)
return ast.Expr(value=call_node)
return node
def parse_py_qualified_name(name: str) -> Union[ast.Name, ast.Attribute]:
return cast(
Union[ast.Name, ast.Attribute],
cast(ast.Expression, ast.parse(name, mode='eval')).body,
)
def assert_all_nodes_have_locations(tree: ast.AST) -> None:
for node in ast.walk(tree):
if isinstance(node, (ast.expr, ast.stmt)):
assert hasattr(node, 'lineno')
assert hasattr(node, 'col_offset')
def flatten(list: List[Union['concat.level0.parse.WordNode', Words]]) -> Words:
flat_list: List[concat.level0.parse.WordNode] = []
for el in list:
if isinstance(el, concat.level0.parse.WordNode):
flat_list.append(el)
else:
flat_list.extend(el)
return flat_list
def call_concat_function(func: ast.expr) -> ast.Call:
load = ast.Load()
stack = ast.Name(id='stack', ctx=load)
stash = ast.Name(id='stash', ctx=load)
call_node = ast.Call(func=func, args=[stack, stash], keywords=[])
return call_node
def abstract(func: ast.expr) -> ast.Lambda:
args = ast.arguments(
[ast.arg('stack', None), ast.arg('stash', None)],
None,
[],
[],
None,
[],
)
py_node = ast.Lambda(args, func)
return py_node
def assign_self_pushing_module_type_to_all_components(
qualified_name: str,
) -> Iterator[ast.Assign]:
qualified_name = qualified_name.strip()
components = tuple(qualified_name.split('.'))
if qualified_name.endswith('.__class__'):
components = components[:-1]
assert components
for i in range(1, len(components) + 1):
target = '.'.join(components[:i])
assert target
assignment = '{}.__class__ = concat.level0.stdlib.importlib.Module'.format(
target
)
yield ast.parse(assignment, mode='exec').body[0] # type: ignore
def append_to_stack(expr: ast.expr) -> ast.expr:
push_func = ast.Attribute(
ast.Name(id='stack', ctx=ast.Load()), 'append', ctx=ast.Load()
)
py_node = ast.Call(func=push_func, args=[expr], keywords=[])
return py_node
def get_explicit_positional_function_parameters(
fun: ast.FunctionDef,
) -> List[str]:
return [arg.arg for arg in fun.args.args]
def wrap_in_statement(statments: Iterable[ast.stmt]) -> ast.stmt:
true = ast.NameConstant(True)
return ast.If(test=true, body=list(statments), orelse=[])
```
#### File: concat/level0/execute.py
```python
import ast
import types
from typing import Dict, Optional, List, Callable
import concat.level0.stdlib.importlib
from concat.level0.stdlib.pyinterop import py_call
class ConcatRuntimeError(RuntimeError):
def __init__(self, stack: List[object], stash: List[object]) -> None:
super().__init__(stack, stash)
self._stack = stack
self._stash = stash
def __str__(self) -> str:
return 'Stack: {!r}, Stash: {!r}'.format(self._stack, self._stash)
class LoggableStack(List[object]):
def __init__(self, name: str, should_log=False) -> None:
self._name = name
self._should_log = should_log
def append(self, val: object) -> None:
super().append(val)
self._should_log and print(self._name, ":: after push (.append):", self)
def pop(self, i: int = -1) -> object:
r = super().pop(i)
self._should_log and print(self._name, ":: after pop (.pop):", self)
return r
def _compile(filename: str, ast_: ast.Module) -> types.CodeType:
return compile(ast_, filename, 'exec')
def _run(
prog: types.CodeType,
globals: Optional[Dict[str, object]] = None,
locals: Optional[Dict[str, object]] = None
) -> None:
globals = {} if globals is None else globals
try:
# FIXME: Imports should be resolved from the location of the source
# file.
exec(prog, globals, globals if locals is None else locals)
except Exception as e:
# throw away all of the traceback outside the code
# traceback = e.__traceback__.tb_next
raise ConcatRuntimeError(globals['stack'], globals['stash']) from e
def _do_preamble(globals: Dict[str, object], should_log_stacks=False) -> None:
"""Add key-value pairs expected by Concat code to the passed-in mapping.
This mutates the mapping, but anything already in the mapping is preserved."""
globals.setdefault('concat', concat)
globals.setdefault('py_call', py_call)
globals.setdefault('stack', LoggableStack('stack', should_log_stacks))
globals.setdefault('stash', LoggableStack('stash', should_log_stacks))
def push(val: object) -> Callable[[List[object], List[object]], None]:
def push_func(stack: List[object], _: List[object]):
stack.append(val)
return push_func
globals.setdefault('push', push)
def execute(
filename: str,
ast: ast.Module,
globals: Dict[str, object],
interactive=False,
locals: Optional[Dict[str, object]] = None,
should_log_stacks=False,
) -> None:
_do_preamble(globals, should_log_stacks)
_run(_compile(filename, ast), globals, locals)
```
#### File: level1/stdlib/compositional.py
```python
from concat.level0.stdlib.types import Quotation
def curry(stack, stash):
"""value $fun -- $(value fun)"""
fun, value = (stack.pop() for _ in range(2))
stack.append(Quotation([lambda s, _: s.append(value), fun]))
```
#### File: stdlib/pyinterop/coroutine.py
```python
import sys
import types
import concat.level0.stdlib.importlib
from typing import List, Callable, cast, Coroutine, Type, Optional
# make this module callable
sys.modules[__name__].__class__ = concat.level0.stdlib.importlib.Module
def send(stack: List[object], stash: List[object]) -> None:
"""value coroutine -- coroutine.send(value)"""
stack.append(cast(Coroutine, stack.pop()).send(stack.pop()))
def throw(stack: List[object], stash: List[object]) -> None:
"""traceback value type coroutine -- coroutine.throw(type, value, traceback)"""
coroutine = stack.pop()
type = cast(Type[BaseException], stack.pop())
value, traceback = (stack.pop() for _ in range(2))
stack.append(cast(Coroutine, coroutine).throw(
type, None if value is None else type(value), cast(Optional[types.TracebackType], traceback)))
def close(stack: List[object], stash: List[object]) -> None:
"""coroutine --"""
coroutine = cast(Coroutine, stack.pop())
coroutine.close()
```
#### File: stdlib/pyinterop/instance.py
```python
import sys
import types
import concat.level0.stdlib.importlib
from typing import List, Callable, cast
# make this module callable
sys.modules[__name__].__class__ = concat.level0.stdlib.importlib.Module
def dict(stack: List[object], stash: List[object]) -> None:
"""$inst -- $inst$.__dict__"""
stack.append(stack.pop().__dict__)
def cls(stack: List[object], stash: List[object]) -> None:
"""$inst -- $inst$.__class__"""
stack.append(stack.pop().__class__)
```
#### File: level2/stdlib/pyinterop.py
```python
from typing import List, Callable, cast
from concat.level1.typecheck import ForAll, StackEffect, PrimitiveTypes, SequenceVariable
_rest_var = SequenceVariable()
_rest_var_2 = SequenceVariable()
_rest_var_3 = SequenceVariable()
globals()['@@types'] = {
'to_py_function': ForAll(
[_rest_var, _rest_var_2, _rest_var_3],
StackEffect([
_rest_var,
StackEffect([_rest_var_2], [_rest_var_3])
], [
_rest_var,
PrimitiveTypes.py_function
])
)
}
def to_py_function(stack: List[object], stash: List[object]) -> None:
func = cast(Callable[[List[object], List[object]], None], stack.pop())
def py_func(*args: object) -> object:
nonlocal stack
stack += [*args]
func(stack, stash)
return stack.pop()
stack.append(py_func)
```
#### File: concat/concat/lex.py
```python
import concat.level0.lex
import concat.level2.lex
from typing import List
def tokenize(code: str) -> List[concat.level0.lex.Token]:
lexer = concat.level2.lex.Lexer()
lexer.input(code)
tokens = []
while True:
token = lexer.token()
if token is None:
break
tokens.append(token)
return tokens
```
#### File: tests/level0/test_execute.py
```python
import concat.level0.execute
import unittest
import ast
class TestExecute(unittest.TestCase):
def setUp(self) -> None:
pass
def test_execute_function(self) -> None:
module = ast.Module(body=[])
concat.level0.execute.execute('<test>', module, {})
# we passed if we get here
```
#### File: tests/level0/test_lex.py
```python
import concat.level0.lex as lex
from concat.level0.lex import TokenTuple
import unittest
from typing import Tuple, Dict, Sequence
class TestSmallExamples(unittest.TestCase):
examples: Dict[str, Sequence[TokenTuple]] = {
'$() $(0) bool\n': ( # newline is important
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('DOLLARSIGN', '$', (1, 0), (1, 1)),
('LPAR', '(', (1, 1), (1, 2)),
('RPAR', ')', (1, 2), (1, 3)),
('DOLLARSIGN', '$', (1, 4), (1, 5)),
('LPAR', '(', (1, 5), (1, 6)),
('NUMBER', '0', (1, 6), (1, 7)),
('RPAR', ')', (1, 7), (1, 8)),
('NAME', 'bool', (1, 9), (1, 13)),
('NEWLINE', '\n', (1, 13), (1, 14)),
('ENDMARKER', '', (2, 0), (2, 0))
),
"$() $('This is a string') len\n": (
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('DOLLARSIGN', '$', (1, 0), (1, 1)),
('LPAR', '(', (1, 1), (1, 2)),
('RPAR', ')', (1, 2), (1, 3)),
('DOLLARSIGN', '$', (1, 4), (1, 5)),
('LPAR', '(', (1, 5), (1, 6)),
('STRING', "'This is a string'", (1, 6), (1, 24)),
('RPAR', ')', (1, 24), (1, 25)),
('NAME', 'len', (1, 26), (1, 29)),
('NEWLINE', '\n', (1, 29), (1, 30)),
('ENDMARKER', '', (2, 0), (2, 0))
),
"$() $('Strings' 'interpolated') '{} can be {}'.format\n": (
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('DOLLARSIGN', '$', (1, 0), (1, 1)),
('LPAR', '(', (1, 1), (1, 2)),
('RPAR', ')', (1, 2), (1, 3)),
('DOLLARSIGN', '$', (1, 4), (1, 5)),
('LPAR', '(', (1, 5), (1, 6)),
('STRING', "'Strings'", (1, 6), (1, 15)),
('STRING', "'interpolated'", (1, 16), (1, 30)),
('RPAR', ')', (1, 30), (1, 31)),
('STRING', "'{} can be {}'", (1, 32), (1, 46)),
('DOT', '.', (1, 46), (1, 47)),
('NAME', 'format', (1, 47), (1, 53)),
('NEWLINE', '\n', (1, 53), (1, 54)),
('ENDMARKER', '', (2, 0), (2, 0))
)
}
def test_examples(self) -> None:
for example in self.examples:
with self.subTest(example=example):
tokens = []
lex.lexer.input(example)
while True:
token = lex.lexer.token()
if token is None:
break
tokens.append(token)
expectationPairs = zip(tokens, self.examples[example])
self.assertTrue(
all(map(self._matches_token, expectationPairs)))
def _matches_token(self, pair: Tuple[lex.Token, TokenTuple]) -> bool:
token, tokTuple = pair
return (
token.type == tokTuple[0]
and token.value == tokTuple[1]
and token.start == tokTuple[2]
and token.end == tokTuple[3]
)
```
#### File: tests/level1/small_example_programs.py
```python
from concat.level0.lex import to_tokens
# The newlines in each example are important.
examples = {
'None\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NONE', 'None', (1, 0), (1, 4)),
('NEWLINE', '\n', (1, 4), (1, 5)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'NotImplemented\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NOTIMPL', 'NotImplemented', (1, 0), (1, 14)),
('NEWLINE', '\n', (1, 14), (1, 15)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'... Ellipsis\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('ELLIPSIS', '...', (1, 0), (1, 3)),
('ELLIPSIS', 'Ellipsis', (1, 4), (1, 12)),
('NEWLINE', '\n', (1, 12), (1, 13)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'[9]\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('LSQB', '[', (1, 0), (1, 1)),
('NUMBER', '9', (1, 1), (1, 2)),
('RSQB', ']', (1, 2), (1, 3)),
('NEWLINE', '\n', (1, 3), (1, 4)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'[7:8]\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('LSQB', '[', (1, 0), (1, 1)),
('NUMBER', '7', (1, 1), (1, 2)),
('COLON', ':', (1, 2), (1, 3)),
('NUMBER', '8', (1, 3), (1, 4)),
('RSQB', ']', (1, 4), (1, 5)),
('NEWLINE', '\n', (1, 5), (1, 6)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'[::0 1 -]\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('LSQB', '[', (1, 0), (1, 1)),
('COLON', ':', (1, 1), (1, 2)),
('COLON', ':', (1, 2), (1, 3)),
('NUMBER', '0', (1, 3), (1, 4)),
('NUMBER', '1', (1, 5), (1, 6)),
('MINUS', '-', (1, 7), (1, 8)),
('RSQB', ']', (1, 8), (1, 9)),
('NEWLINE', '\n', (1, 9), (1, 10)),
('ENDMARKER', '', (2, 0), (2, 0))
),
"b'bytes'\n": to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('BYTES', "b'bytes'", (1, 0), (1, 8)),
('NEWLINE', '\n', (1, 8), (1, 9)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'(5,)\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('LPAR', '(', (1, 0), (1, 1)),
('NUMBER', '5', (1, 1), (1, 2)),
('COMMA', ',', (1, 2), (1, 3)),
('RPAR', ')', (1, 3), (1, 4)),
('NEWLINE', '\n', (1, 4), (1, 5)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'[,]\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('LSQB', '[', (1, 0), (1, 1)),
('COMMA', ',', (1, 1), (1, 2)),
('RSQB', ']', (1, 2), (1, 3)),
('NEWLINE', '\n', (1, 3), (1, 4)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'(1,2,3)\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('LPAR', '(', (1, 0), (1, 1)),
('NUMBER', '1', (1, 1), (1, 2)),
('COMMA', ',', (1, 2), (1, 3)),
('NUMBER', '2', (1, 3), (1, 4)),
('COMMA', ',', (1, 4), (1, 5)),
('NUMBER', '3', (1, 5), (1, 6)),
('RPAR', ')', (1, 6), (1, 7)),
('NEWLINE', '\n', (1, 7), (1, 8)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'(1,2,)\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('LPAR', '(', (1, 0), (1, 1)),
('NUMBER', '1', (1, 1), (1, 2)),
('COMMA', ',', (1, 2), (1, 3)),
('NUMBER', '2', (1, 3), (1, 4)),
('COMMA', ',', (1, 4), (1, 5)),
('RPAR', ')', (1, 5), (1, 6)),
('NEWLINE', '\n', (1, 6), (1, 7)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'del .attr\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('DEL', 'del', (1, 0), (1, 3)),
('DOT', '.', (1, 4), (1, 5)),
('NAME', 'attr', (1, 5), (1, 9)),
('NEWLINE', '\n', (1, 9), (1, 10)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'{1,2,3,}\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('LBRACE', '{', (1, 0), (1, 1)),
('NUMBER', '1', (1, 1), (1, 2)),
('COMMA', ',', (1, 2), (1, 3)),
('NUMBER', '2', (1, 3), (1, 4)),
('COMMA', ',', (1, 4), (1, 5)),
('NUMBER', '3', (1, 5), (1, 6)),
('COMMA', ',', (1, 6), (1, 7)),
('RBRACE', '}', (1, 7), (1, 8)),
('NEWLINE', '\n', (1, 8), (1, 9)),
('ENDMARKER', '', (2, 0), (2, 0))
),
"{'a':1,'b':2}\n": to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('LBRACE', '{', (1, 0), (1, 1)),
('STRING', "'a'", (1, 1), (1, 4)),
('COLON', ':', (1, 4), (1, 5)),
('NUMBER', '1', (1, 5), (1, 6)),
('COMMA', ',', (1, 6), (1, 7)),
('STRING', "'b'", (1, 7), (1, 10)),
('COLON', ':', (1, 10), (1, 11)),
('NUMBER', '2', (1, 11), (1, 12)),
('RBRACE', '}', (1, 12), (1, 13)),
('NEWLINE', '\n', (1, 13), (1, 14)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'word yield\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NAME', 'word', (1, 0), (1, 4)),
('YIELD', 'yield', (1, 5), (1, 10)),
('NEWLINE', '\n', (1, 10), (1, 11)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'async def fun: 5\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('ASYNC', 'async', (1, 0), (1, 5)),
('DEF', 'def', (1, 6), (1, 9)),
('NAME', 'fun', (1, 10), (1, 13)),
('COLON', ':', (1, 13), (1, 14)),
('NUMBER', '5', (1, 15), (1, 16)),
('NEWLINE', '\n', (1, 16), (1, 17)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'word await\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NAME', 'word', (1, 0), (1, 4)),
('AWAIT', 'await', (1, 5), (1, 10)),
('NEWLINE', '\n', (1, 10), (1, 11)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'import a.submodule\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('IMPORT', 'import', (1, 0), (1, 6)),
('NAME', 'a', (1, 7), (1, 8)),
('DOT', '.', (1, 8), (1, 9)),
('NAME', 'submodule', (1, 9), (1, 18)),
('NEWLINE', '\n', (1, 18), (1, 19)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'import a as b\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('IMPORT', 'import', (1, 0), (1, 6)),
('NAME', 'a', (1, 7), (1, 8)),
('AS', 'as', (1, 9), (1, 11)),
('NAME', 'b', (1, 12), (1, 13)),
('NEWLINE', '\n', (1, 13), (1, 14)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'from .a import b\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('FROM', 'from', (1, 0), (1, 4)),
('DOT', '.', (1, 5), (1, 6)),
('NAME', 'a', (1, 6), (1, 7)),
('IMPORT', 'import', (1, 8), (1, 14)),
('NAME', 'b', (1, 15), (1, 16)),
('NEWLINE', '\n', (1, 16), (1, 17)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'from .a import b as c\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('FROM', 'from', (1, 0), (1, 4)),
('DOT', '.', (1, 5), (1, 6)),
('NAME', 'a', (1, 6), (1, 7)),
('IMPORT', 'import', (1, 8), (1, 14)),
('NAME', 'b', (1, 15), (1, 16)),
('AS', 'as', (1, 17), (1, 19)),
('NAME', 'c', (1, 20), (1, 21)),
('NEWLINE', '\n', (1, 21), (1, 22)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'from a import *\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('FROM', 'from', (1, 0), (1, 4)),
('NAME', 'a', (1, 5), (1, 6)),
('IMPORT', 'import', (1, 7), (1, 13)),
('STAR', '*', (1, 14), (1, 15)),
('NEWLINE', '\n', (1, 15), (1, 16)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'class A: pass\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('CLASS', 'class', (1, 0), (1, 5)),
('NAME', 'A', (1, 6), (1, 7)),
('COLON', ':', (1, 7), (1, 8)),
('NAME', 'pass', (1, 9), (1, 13)),
('NEWLINE', '\n', (1, 13), (1, 14)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'class A @decorator: pass\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('CLASS', 'class', (1, 0), (1, 5)),
('NAME', 'A', (1, 6), (1, 7)),
('AT', '@', (1, 8), (1, 9)),
('NAME', 'decorator', (1, 9), (1, 18)),
('COLON', ':', (1, 18), (1, 19)),
('NAME', 'pass', (1, 20), (1, 24)),
('NEWLINE', '\n', (1, 24), (1, 25)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'class A($B,): pass\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('CLASS', 'class', (1, 0), (1, 5)),
('NAME', 'A', (1, 6), (1, 7)),
('LPAR', '(', (1, 7), (1, 8)),
('DOLLARSIGN', '$', (1, 8), (1, 9)),
('NAME', 'B', (1, 9), (1, 10)),
('COMMA', ',', (1, 10), (1, 11)),
('RPAR', ')', (1, 11), (1, 12)),
('COLON', ':', (1, 12), (1, 13)),
('NAME', 'pass', (1, 14), (1, 18)),
('NEWLINE', '\n', (1, 18), (1, 19)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'def test: pass\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('DEF', 'def', (1, 0), (1, 3)),
('NAME', 'test', (1, 4), (1, 8)),
('COLON', ':', (1, 8), (1, 9)),
('NAME', 'pass', (1, 10), (1, 14)),
('NEWLINE', '\n', (1, 14), (1, 15)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'class A metaclass=$M: pass\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('CLASS', 'class', (1, 0), (1, 5)),
('NAME', 'A', (1, 6), (1, 7)),
('NAME', 'metaclass', (1, 8), (1, 17)),
('EQUAL', '=', (1, 17), (1, 18)),
('DOLLARSIGN', '$', (1, 18), (1, 19)),
('NAME', 'M', (1, 19), (1, 20)),
('COLON', ':', (1, 20), (1, 21)),
('NAME', 'pass', (1, 22), (1, 26)),
('NEWLINE', '\n', (1, 26), (1, 27)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'2 4 **\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '2', (1, 0), (1, 1)),
('NUMBER', '4', (1, 2), (1, 3)),
('DOUBLESTAR', '**', (1, 4), (1, 6)),
('NEWLINE', '\n', (1, 6), (1, 7)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'0 ~\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '0', (1, 0), (1, 1)),
('TILDE', '~', (1, 2), (1, 3)),
('NEWLINE', '\n', (1, 3), (1, 4)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'6 9 *\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '6', (1, 0), (1, 1)),
('NUMBER', '9', (1, 2), (1, 3)),
('STAR', '*', (1, 4), (1, 5)),
('NEWLINE', '\n', (1, 5), (1, 6)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'A B @\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NAME', 'A', (1, 0), (1, 1)),
('NAME', 'B', (1, 2), (1, 3)),
('AT', '@', (1, 4), (1, 5)),
('NEWLINE', '\n', (1, 5), (1, 6)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 //\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('DOUBLESLASH', '//', (1, 4), (1, 6)),
('NEWLINE', '\n', (1, 6), (1, 7)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 /\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('SLASH', '/', (1, 4), (1, 5)),
('NEWLINE', '\n', (1, 5), (1, 6)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 %\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('PERCENT', '%', (1, 4), (1, 5)),
('NEWLINE', '\n', (1, 5), (1, 6)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 +\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('PLUS', '+', (1, 4), (1, 5)),
('NEWLINE', '\n', (1, 5), (1, 6)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 -\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('MINUS', '-', (1, 4), (1, 5)),
('NEWLINE', '\n', (1, 5), (1, 6)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 <<\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('LEFTSHIFT', '<<', (1, 4), (1, 6)),
('NEWLINE', '\n', (1, 6), (1, 7)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 >>\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('RIGHTSHIFT', '>>', (1, 4), (1, 6)),
('NEWLINE', '\n', (1, 6), (1, 7)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 &\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('AMPER', '&', (1, 4), (1, 5)),
('NEWLINE', '\n', (1, 5), (1, 6)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 ^\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('CIRCUMFLEX', '^', (1, 4), (1, 5)),
('NEWLINE', '\n', (1, 5), (1, 6)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 |\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('VBAR', '|', (1, 4), (1, 5)),
('NEWLINE', '\n', (1, 5), (1, 6)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 <\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('LESS', '<', (1, 4), (1, 5)),
('NEWLINE', '\n', (1, 5), (1, 6)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 >\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('GREATER', '>', (1, 4), (1, 5)),
('NEWLINE', '\n', (1, 5), (1, 6)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 ==\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('EQEQUAL', '==', (1, 4), (1, 6)),
('NEWLINE', '\n', (1, 6), (1, 7)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 >=\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('GREATEREQUAL', '>=', (1, 4), (1, 6)),
('NEWLINE', '\n', (1, 6), (1, 7)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 <=\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('LESSEQUAL', '<=', (1, 4), (1, 6)),
('NEWLINE', '\n', (1, 6), (1, 7)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 !=\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('NOTEQUAL', '!=', (1, 4), (1, 6)),
('NEWLINE', '\n', (1, 6), (1, 7)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 2 is\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NUMBER', '2', (1, 2), (1, 3)),
('IS', 'is', (1, 4), (1, 6)),
('NEWLINE', '\n', (1, 6), (1, 7)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 $() in\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('DOLLARSIGN', '$', (1, 2), (1, 3)),
('LPAR', '(', (1, 3), (1, 4)),
('RPAR', ')', (1, 4), (1, 5)),
('IN', 'in', (1, 6), (1, 8)),
('NEWLINE', '\n', (1, 8), (1, 9)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 $() or\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('DOLLARSIGN', '$', (1, 2), (1, 3)),
('LPAR', '(', (1, 3), (1, 4)),
('RPAR', ')', (1, 4), (1, 5)),
('OR', 'or', (1, 6), (1, 8)),
('NEWLINE', '\n', (1, 8), (1, 9)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 $() and\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('DOLLARSIGN', '$', (1, 2), (1, 3)),
('LPAR', '(', (1, 3), (1, 4)),
('RPAR', ')', (1, 4), (1, 5)),
('AND', 'and', (1, 6), (1, 9)),
('NEWLINE', '\n', (1, 9), (1, 10)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'1 not\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NUMBER', '1', (1, 0), (1, 1)),
('NOT', 'not', (1, 2), (1, 5)),
('NEWLINE', '\n', (1, 5), (1, 6)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'True assert\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NAME', 'True', (1, 0), (1, 4)),
('ASSERT', 'assert', (1, 5), (1, 11)),
('NEWLINE', '\n', (1, 11), (1, 12)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'None AnException raise\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('NONE', 'None', (1, 0), (1, 4)),
('NAME', 'AnException', (1, 5), (1, 16)),
('RAISE', 'raise', (1, 17), (1, 22)),
('NEWLINE', '\n', (1, 22), (1, 23)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'$() $() try\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('DOLLARSIGN', '$', (1, 0), (1, 1)),
('LPAR', '(', (1, 1), (1, 2)),
('RPAR', ')', (1, 2), (1, 3)),
('DOLLARSIGN', '$', (1, 4), (1, 5)),
('LPAR', '(', (1, 5), (1, 6)),
('RPAR', ')', (1, 6), (1, 7)),
('TRY', 'try', (1, 8), (1, 11)),
('NEWLINE', '\n', (1, 11), (1, 12)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'$() ctxmgr with\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('DOLLARSIGN', '$', (1, 0), (1, 1)),
('LPAR', '(', (1, 1), (1, 2)),
('RPAR', ')', (1, 2), (1, 3)),
('NAME', 'ctxmgr', (1, 4), (1, 10)),
('WITH', 'with', (1, 11), (1, 15)),
('NEWLINE', '\n', (1, 15), (1, 16)),
('ENDMARKER', '', (2, 0), (2, 0))
),
'def f(a b -- c): ()\n': to_tokens(
('ENCODING', 'utf-8', (0, 0), (0, 0)),
('DEF', 'def', (1, 0), (1, 3)),
('NAME', 'f', (1, 4), (1, 5)),
('LPAR', '(', (1, 5), (1, 6)),
('NAME', 'a', (1, 6), (1, 7)),
('NAME', 'b', (1, 8), (1, 9)),
('MINUS', '-', (1, 10), (1, 11)),
('MINUS', '-', (1, 11), (1, 12)),
('NAME', 'c', (1, 13), (1, 14)),
('RPAR', ')', (1, 14), (1, 15)),
('COLON', ':', (1, 15), (1, 16)),
('LPAR', '(', (1, 17), (1, 18)),
('RPAR', ')', (1, 18), (1, 19)),
('NEWLINE', '\n', (1, 19), (1, 20)),
('ENDMARKER', '', (2, 0), (2, 0))
)
}
```
#### File: level1/stdlib/test_repl.py
```python
import unittest
import io
import sys
import contextlib
import concat.level1.parse
import concat.level1.typecheck
from concat.level1.typecheck.types import SequenceVariable, StackEffect
import concat.level1.stdlib.types
import concat.level1.stdlib.repl
from typing import TextIO, Iterator
@contextlib.contextmanager
def replace_stdin(input_stream: TextIO) -> Iterator[None]:
# don't use sys.__stdin__ because sys.stdin might not be the original one
original_stdin = sys.stdin
sys.stdin = input_stream
try:
yield
finally:
sys.stdin = original_stdin
class TestREPLFunctions(unittest.TestCase):
def test_read_quot(self):
stack = []
seq_var = SequenceVariable()
# Like in Factor, read_quot will search its caller's scope for objects.
some, words, here = object(), object(), object()
with replace_stdin(io.StringIO('some words here')):
concat.level1.stdlib.repl.read_quot(
stack,
[],
extra_env=concat.level1.typecheck.Environment(
{
'some': StackEffect([seq_var], []),
'words': StackEffect([], []),
'here': StackEffect([], []),
}
),
)
self.assertEqual(
stack,
[concat.level1.stdlib.types.Quotation([some, words, here])],
msg='read_quot has incorrect stack effect',
)
def test_repl(self):
with replace_stdin(io.StringIO('[,] [,] $input py_call\nhi there')):
concat.level1.stdlib.repl.repl([], [])
self.assertEqual(
sys.stdin.read(), '', msg='repl did not consume all input'
)
def test_catch_parse_errors(self):
with replace_stdin(io.StringIO('drg nytu y,i.')):
try:
concat.level1.stdlib.repl.repl([], [])
except concat.level1.parse.ParseError:
self.fail('repl must recover from parser failures')
```
#### File: tests/level1/test_lex.py
```python
import concat.level1.lex as lex
from concat.tests.level1.small_example_programs import examples
import unittest
class TestSmallExamples(unittest.TestCase):
def test_examples(self) -> None:
for example in examples:
with self.subTest(example=example):
tokens = []
lex.lexer.input(example)
while True:
token = lex.lexer.token()
if token is None:
break
tokens.append(token)
message = '{!r} is not lexed correctly'.format(example)
self.assertEqual(tokens, [*examples[example]], message)
```
#### File: tests/level1/test_parse.py
```python
import concat.level0.parse
import concat.level1.parse
from concat.tests.level1.small_example_programs import examples
import unittest
import parsy
class TestSmallExamples(unittest.TestCase):
"""Test that parser recognizes small example programs (token sequences)."""
def test_examples(self) -> None:
for example in examples:
with self.subTest(example=example):
tokens = examples[example]
parsers = concat.level0.parse.ParserDict()
parsers.extend_with(concat.level0.parse.level_0_extension)
parsers.extend_with(concat.level1.parse.level_1_extension)
# We place a substitute stack effect parser in the dictionary
parsers['stack-effect-type'] = parsers.token('NAME').many() >> parsers.token(
'MINUS').many() >> parsers.token('NAME').many()
# for example programs, we only test acceptance
try:
parsers.parse(tuple(tokens))
except parsy.ParseError:
message = '{} was not accepted by the parser'.format(
repr(example))
self.fail(msg=message)
```
#### File: tests/level1/test_transpile.py
```python
import concat.visitors
from concat.astutils import get_explicit_positional_function_parameters
from concat.level0.lex import Token
import concat.level0.parse
import concat.level0.transpile
import concat.level1.parse
import concat.level1.transpile
import unittest
import ast
from typing import Iterable, Iterator, List, Sequence, Type, cast
import astunparse # type: ignore
class TestSubVisitors(unittest.TestCase):
def setUp(self) -> None:
self.__visitors = concat.visitors.VisitorDict[
concat.level0.parse.Node, ast.AST
]()
self.__visitors.extend_with(concat.level0.transpile.level_0_extension)
self.__visitors.extend_with(concat.level1.transpile.level_1_extension)
def _test_visitor(
self,
node: concat.level0.parse.Node,
visitor: str,
py_node_type: Type[ast.AST],
) -> ast.AST:
try:
py_node = self.__visitors[visitor].visit(node)
except concat.visitors.VisitFailureException:
message_template = '{} was not accepted by the {} visitor'
message = message_template.format(node, visitor)
self.fail(msg=message)
message = 'Python node is not a {}'.format(py_node_type.__qualname__)
self.assertIsInstance(py_node, py_node_type, msg=message)
return py_node
def _test_visitors(
self,
node: concat.level0.parse.Node,
visitors: Iterable[str],
py_node_type: Type[ast.AST],
) -> Iterator[ast.AST]:
for visitor in visitors:
yield self._test_visitor(node, visitor, py_node_type)
def _test_visitor_basic(
self, node: concat.level0.parse.Node, visitor: str
) -> ast.AST:
return self._test_visitor(node, visitor, ast.Call)
def test_none_word_visitor(self) -> None:
"""Tests that none words are transpiled to calls which contain None."""
none = Token()
none.start = (0, 0)
node = concat.level1.parse.NoneWordNode(none)
py_node = self._test_visitor_basic(node, 'none-word')
value = cast(ast.NameConstant, cast(ast.Call, py_node).args[0]).value
self.assertIs(
value, None, msg='Python None node does not contain `None`'
)
def test_not_impl_word_visitor(self) -> None:
"""Not-impl words are transpiled to calls containing NotImplemented."""
not_impl = Token()
not_impl.start = (0, 0)
node = concat.level1.parse.NotImplWordNode(not_impl)
py_node = self._test_visitor_basic(node, 'not-impl-word')
identifier = cast(ast.Name, cast(ast.Call, py_node).args[0]).id
message = 'Python Name node does not contain "NotImplemented"'
self.assertEqual(identifier, 'NotImplemented', msg=message)
def test_ellipsis_word_visitor(self) -> None:
"""Ellipsis words are transpiled to calls which contain '...'."""
ellipsis = Token()
ellipsis.start = (0, 0)
node = concat.level1.parse.EllipsisWordNode(ellipsis)
py_node = self._test_visitor_basic(node, 'ellipsis-word')
message = 'The Python node within the call is not an Ellipsis'
self.assertIsInstance(
cast(ast.Call, py_node).args[0], ast.Ellipsis, msg=message
)
def test_slice_word_visitor_with_step(self) -> None:
two_token = concat.level0.lex.Token()
two_token.type, two_token.value = 'NUMBER', '2'
two = concat.level0.parse.NumberWordNode(two_token)
node = concat.level1.parse.SliceWordNode(([], [], [two]))
py_node = self._test_visitor(node, 'slice-word', ast.expr)
self.assertIn(
'2',
astunparse.unparse(py_node),
msg='Python node does not contain 2',
)
def test_del_statement_visitor(self) -> None:
"""Concat del statements are transpiled to Python del statements."""
name_token = concat.level0.lex.Token()
name_token.value, name_token.start = 'a', (0, 0)
name = concat.level0.parse.NameWordNode(name_token)
node = concat.level1.parse.DelStatementNode([name])
self._test_visitors(node, {'del-statement', 'statement'}, ast.Delete)
def test_async_funcdef_statement_visitor(self) -> None:
"""Async function definitions are transpiled to the same kind of Python statement."""
name_token = concat.level0.lex.Token()
name_token.value, name_token.start = 'a', (0, 0)
node = concat.level1.parse.AsyncFuncdefStatementNode(
name_token, [], [], [], (0, 0)
)
visitors = {'async-funcdef-statement', 'statement'}
self._test_visitors(node, visitors, ast.AsyncFunctionDef)
def test_funcdef_statement_visitor(self) -> None:
"""Function definitions are transpiled to the same kind of Python statement."""
name_token = concat.level0.lex.Token()
name_token.value, name_token.start = 'a', (0, 0)
node = concat.level1.parse.FuncdefStatementNode(
name_token, [], [], [], (0, 0)
)
self._test_visitors(
node, {'funcdef-statement', 'statement'}, ast.FunctionDef
)
def test_import_statement_visitor_with_as(self) -> None:
"""import ... as ... statements are transpiled to the same kind of Python statement.
The as-clause will be present in the resulting Python AST."""
node = concat.level1.parse.ImportStatementNode('a.submodule', 'b')
for py_node in self._test_visitors(
node, {'import-statement', 'statement'}, ast.stmt
):
self.assertIn(
'as b',
astunparse.unparse(py_node),
msg='as-part was not transpiled',
)
def test_import_statement_visitor_with_from(self) -> None:
node = concat.level1.parse.FromImportStatementNode('a.submodule', 'b')
for py_node in self._test_visitors(
node, {'import-statement', 'statement'}, ast.stmt
):
self.assertIn(
'from',
astunparse.unparse(py_node),
msg='was not transpiled as from-import',
)
def test_import_statement_visitor_with_from_and_as(self) -> None:
node = concat.level1.parse.FromImportStatementNode(
'a.submodule', 'b', 'c'
)
for py_node in self._test_visitors(
node, {'import-statement', 'statement'}, ast.stmt
):
self.assertIn(
'from',
astunparse.unparse(py_node),
msg='was not transpiled as from-import',
)
self.assertIn(
'as c',
astunparse.unparse(py_node),
msg='as-part was not transpiled',
)
def test_import_statement_visitor_with_from_and_star(self) -> None:
node = concat.level1.parse.FromImportStarStatementNode('a')
for py_node in self._test_visitors(
node, {'import-statement', 'statement'}, ast.stmt
):
self.assertIn(
'from',
astunparse.unparse(py_node),
msg='was not transpiled as from-import',
)
self.assertIn(
'*',
astunparse.unparse(py_node),
msg='star-part was not transpiled',
)
def test_classdef_statement_visitor(self) -> None:
node = concat.level1.parse.ClassdefStatementNode('A', [], (0, 0))
self._test_visitors(
node, {'classdef-statement', 'statement'}, ast.ClassDef
)
def test_classdef_statement_visitor_with_decorators(self) -> None:
name = Token()
name.start, name.value = (0, 0), 'decorator'
decorator = concat.level0.parse.NameWordNode(name)
node = concat.level1.parse.ClassdefStatementNode(
'A', [], (0, 0), [decorator]
)
for py_node in self._test_visitors(
node, {'classdef-statement', 'statement'}, ast.ClassDef
):
self.assertIn(
'@',
astunparse.unparse(py_node),
msg='decorator was not transpiled',
)
def test_classdef_statement_visitor_with_bases(self) -> None:
name = Token()
name.start, name.value = (0, 0), 'base'
base = concat.level0.parse.NameWordNode(name)
node = concat.level1.parse.ClassdefStatementNode(
'A', [], (0, 0), [], [[base]]
)
for py_node in self._test_visitors(
node, {'classdef-statement', 'statement'}, ast.ClassDef
):
self.assertIn(
'(',
astunparse.unparse(py_node),
msg='bases were not transpiled',
)
self.assertIn(
'base',
astunparse.unparse(py_node),
msg='bases were not transpiled',
)
def test_classdef_statement_visitor_with_keyword_args(self) -> None:
name = Token()
name.start, name.value = (0, 0), 'meta'
word = concat.level0.parse.NameWordNode(name)
node = concat.level1.parse.ClassdefStatementNode(
'A', [], (0, 0), [], [], [('metaclass', word)]
)
for py_node in self._test_visitors(
node, {'classdef-statement', 'statement'}, ast.ClassDef
):
self.assertIn(
'(',
astunparse.unparse(py_node),
msg='keyword arguments were not transpiled',
)
self.assertIn(
'metaclass=',
astunparse.unparse(py_node),
msg='keyword arguments were not transpiled',
)
def test_subtract_word(self) -> None:
"""Tests that subtract words are successfuly transpiled."""
minus = Token('MINUS', '-')
word = concat.level1.operators.SubtractWordNode(minus)
for py_node in self._test_visitors(
word, {'word', 'operator-word', 'subtract-word'}, ast.expr
):
self.assertIn(
'-', astunparse.unparse(py_node), msg='no subtraction operator'
)
class TestMagicMethodTranspilaton(unittest.TestCase):
"""Test that magic methods are transformed into what Python expects.
Note that we don't transform module-level __getattr__ and __dict__.
TODO: handle __(mro_entries, class_getitem)__ since those are Python 3.7
features.
TODO: handle __set_name__ since that was addded in 3.6.
Special names that aren't methods, like __slots__ aren't accounted for. We
don't even have assignment!"""
def setUp(self) -> None:
self.__visitors = concat.visitors.VisitorDict[
concat.level0.parse.Node, ast.AST
]()
self.__visitors.extend_with(concat.level0.transpile.level_0_extension)
self.__visitors.extend_with(concat.level1.transpile.level_1_extension)
def _make_magic_py_method_from_name(
self, method_name: str
) -> ast.FunctionDef:
name = Token()
name.start, name.value = (0, 0), '__{}__'.format(method_name)
definition = concat.level1.parse.FuncdefStatementNode(
name, [], None, [], (0, 0)
)
node = concat.level1.parse.ClassdefStatementNode(
'A', [definition], (0, 0), [], []
)
py_node = cast(
ast.ClassDef, self.__visitors['classdef-statement'].visit(node)
)
return cast(ast.FunctionDef, py_node.body[0])
def _assert_explicit_positional_parameters_equal(
self, fun: ast.FunctionDef, params: List[str]
) -> None:
fun_params = get_explicit_positional_function_parameters(fun)
self.assertEqual(
fun_params, params, msg='wrong explicit positional parameters'
)
def _assert_pushes(
self, fun: ast.FunctionDef, name: str, statement_number: int = 0
) -> None:
py_statement = fun.body[statement_number]
self.assertIn(
'stack.append({})'.format(name),
astunparse.unparse(py_statement),
msg="doesn't push {}".format(name),
)
def _assert_pushes_all_at_once(
self, fun: ast.FunctionDef, *items: str
) -> None:
py_first_statement = fun.body[0:2]
items_str = ', '.join(items)
self.assertIn(
'stack += [{}]'.format(items_str),
astunparse.unparse(py_first_statement),
msg="doesn't push {}".format(items_str),
)
def _assert_returns_top_of_stack(self, fun: ast.FunctionDef) -> None:
py_last_statement = fun.body[-1]
message = "doesn't pop return value off stack"
self.assertIn(
'return stack.pop()',
astunparse.unparse(py_last_statement),
msg=message,
)
def _test_magic_method_basic(
self, name: str, params: Sequence[str], *pushed: str
) -> ast.FunctionDef:
fun = self._make_magic_py_method_from_name(name)
self._assert_explicit_positional_parameters_equal(fun, list(params))
for index, item in enumerate(pushed):
self._assert_pushes(fun, item, index)
return fun
def test__new__(self) -> None:
"""Test that transpiled __new__ methods take the class, stack, and stash.
def __new__ should become def __new__(cls, stack, stash) and it should push cls onto the stack before executing the rest of the function."""
self._test_magic_method_basic('new', ['cls', 'stack', 'stash'], 'cls')
def test_instance_functions_with_concat_signatures(self) -> None:
"""Test that transpiled __(init, call)__ methods take self, the stack, and the stash.
For example, def __init__ should become def __init__(self, stack, stash) and it should push self onto the stack before executing the rest of the function. The function need not return a value other than None."""
for method in {'init', 'call'}:
with self.subTest(
msg='testing __{}__'.format(method), method=method
):
self._test_magic_method_basic(
method, ['self', 'stack', 'stash'], 'self'
)
def test_self_only_methods(self) -> None:
"""Test that transpiled __(del, repr, etc.)__ methods take only self.
For example, def __del__ should become def __del__(self) and it should
push self onto the stack before executing the rest of the function.
Then, it should return stack.pop()."""
for method in {
'del',
'repr',
'str',
'bytes',
'hash',
'bool',
'dir',
'len',
'length_hint',
'aenter',
'anext',
'aiter',
'await',
'enter',
'ceil',
'floor',
'trunc',
'index',
'float',
'int',
'complex',
'invert',
'abs',
'pos',
'neg',
'reversed',
'iter',
}:
method_name = '__{}__'.format(method)
with self.subTest(
msg='testing {}'.format(method_name), method=method_name
):
py_def = self._test_magic_method_basic(
method, ['self'], 'self'
)
self._assert_returns_top_of_stack(py_def)
def test__format__(self) -> None:
"""Test that transpiled __format__ methods take only self and format_spec.
def __format__ should become def __format__(self, format_spec) and it should push format_spec and self onto the stack before executing the rest of the function. The function should return stack.pop()."""
py_format_def = self._test_magic_method_basic(
'format', ['self', 'format_spec'], 'format_spec', 'self'
)
self._assert_returns_top_of_stack(py_format_def)
def test_comparisons_and_augmented_assignments(self) -> None:
"""Test that transpiled comparison/augmented assignment methods take only self and other.
For example, def __lt__ should become def __lt__(self, other) and it should push self and other onto the stack before executing the rest of the function. The function should return stack.pop().
__ipow__ is not tested here; it has a different signature."""
for method in {
'lt',
'le',
'eq',
'ne',
'gt',
'ge',
'ior',
'ixor',
'iand',
'irshift',
'ilshift',
'imod',
'ifloordiv',
'itruediv',
'imatmul',
'imul',
'isub',
'iadd',
'ror',
'rxor',
'rand',
'rrshift',
'rlshift',
'rmod',
'rfloordiv',
'rtruediv',
'rmatmul',
'rmul',
'rsub',
'radd',
'rpow',
'or',
'xor',
'and',
'rshift',
'lshift',
'mod',
'floordiv',
'truediv',
'matmul',
'mul',
'sub',
'add',
}:
with self.subTest(
msg='testing __{}__'.format(method), method=method
):
py_def = self._test_magic_method_basic(
method, ['self', 'other'], 'self', 'other'
)
self._assert_returns_top_of_stack(py_def)
def test_attribute_methods_except_setattr_and_dir(self) -> None:
"""Test that transpiled __(getattr, getattribute, etc.)__ methods take only self and name.
For example, def __getattr__ should become def __getattr__(self, name) and it should push name and self onto the stack before executing the rest of the function. The function should return stack.pop()."""
for method in {'getattr', 'getattribute', 'delattr'}:
with self.subTest(
msg='testing __{}__'.format(method), method=method
):
py_getattr_def = self._test_magic_method_basic(
method, ['self', 'name'], 'name', 'self'
)
self._assert_returns_top_of_stack(py_getattr_def)
def test__setattr__(self) -> None:
"""Test that transpiled __setattr__ methods take only self, name, and value.
def __setattr__ should become def __setattr__(self, name, value) and it should push value, name, and self onto the stack before executing the rest of the function. The function should return stack.pop()."""
py_setattr_def = self._test_magic_method_basic(
'setattr', ['self', 'name', 'value']
)
self._assert_pushes_all_at_once(
py_setattr_def, 'value', 'name', 'self'
)
self._assert_returns_top_of_stack(py_setattr_def)
def test__get__(self) -> None:
"""Test that transpiled __get__ methods take only self, instance, and owner.
def __get__ should become def __get__(self, instance, owner) and it should push owner, instance, and self onto the stack before executing the rest of the function. The function should return stack.pop()."""
py_get_def = self._test_magic_method_basic(
'get', ['self', 'instance', 'owner']
)
self._assert_pushes_all_at_once(
py_get_def, 'owner', 'instance', 'self'
)
self._assert_returns_top_of_stack(py_get_def)
def test__set__(self) -> None:
"""Test that transpiled __set__ methods take only self, instance, and value.
def __set__ should become def __set__(self, instance, value) and it should push value, instance, and self onto the stack before executing the rest of the function. The function should return stack.pop()."""
py_set_def = self._test_magic_method_basic(
'set', ['self', 'instance', 'value']
)
self._assert_pushes_all_at_once(
py_set_def, 'value', 'instance', 'self'
)
self._assert_returns_top_of_stack(py_set_def)
def test_methods_taking_self_and_instance(self) -> None:
"""Test that transpiled __(delete, instancecheck)__ methods take only self and instance.
For example, def __delete__ should become def __delete__(self, instance) and it should push instance and self onto the stack before executing the rest of the function. The function should return stack.pop()."""
for method in {'delete', 'instancecheck'}:
with self.subTest(
msg='testing __{}__'.format(method), method=method
):
py_defun = self._test_magic_method_basic(
method, ['self', 'instance']
)
self._assert_pushes_all_at_once(py_defun, 'instance', 'self')
self._assert_returns_top_of_stack(py_defun)
def test__init_subclass__(self) -> None:
"""Test that transpiled __init_subclass__ methods take only cls and arbitrary keyword arguments.
def __init_subclass__ should become def __init_subclass__(cls, **kwargs) and it should push kwargs and self onto the stack before executing the rest of the function. The function should return stack.pop()."""
py_init_subclass_def = self._test_magic_method_basic(
'init_subclass', ['cls']
)
py_kwarg_object = py_init_subclass_def.args.kwarg
self.assertIsNotNone(py_kwarg_object, msg='no ** argument')
py_kwarg = cast(ast.arg, py_kwarg_object).arg
self.assertEqual(py_kwarg, 'kwargs', msg='wrong ** argument')
self._assert_pushes_all_at_once(py_init_subclass_def, 'kwargs', 'self')
self._assert_returns_top_of_stack(py_init_subclass_def)
def test__prepare__(self) -> None:
"""Test that transpiled __prepare__ methods take only cls, bases, and arbitrary keyword arguments.
def __prepare__ should become def __prepare__(cls, name, bases, **kwds) and it should push kwds, bases, name, and self onto the stack before executing the rest of the function. The function should return stack.pop(). It is up to the programmer to decorate the function with @classmethod."""
py_prepare_def = self._test_magic_method_basic(
'prepare', ['cls', 'name', 'bases']
)
py_kwarg_object = py_prepare_def.args.kwarg
self.assertIsNotNone(py_kwarg_object, msg='no ** argument')
py_kwarg = cast(ast.arg, py_kwarg_object).arg
self.assertEqual(py_kwarg, 'kwds', msg='wrong ** argument')
self._assert_pushes_all_at_once(
py_prepare_def, 'kwds', 'bases', 'name', 'cls'
)
self._assert_returns_top_of_stack(py_prepare_def)
def test__subclasscheck__(self) -> None:
"""Test that transpiled __subclasscheck__ methods take only self and subclass.
def __subclasscheck__ should become def __subclasscheck__(self, subclass) and it should push subclass and self onto the stack before executing the rest of the function. The function should return stack.pop()."""
py_subclasscheck_def = self._test_magic_method_basic(
'subclasscheck', ['self', 'subclass']
)
self._assert_pushes_all_at_once(
py_subclasscheck_def, 'subclass', 'self'
)
self._assert_returns_top_of_stack(py_subclasscheck_def)
def test_key_related_methods(self) -> None:
"""Test that transpiled __(getitem, missing, etc.)__ methods take only self and key.
For example, def __getitem__ should become def __getitem__(self, key) and it should push key and self onto the stack before executing the rest of the function. The function should return stack.pop().
Note: __setitem__ has a different signature."""
for method in {'getitem', 'missing', 'delitem'}:
method_name = '__{}__'.format(method)
with self.subTest(
msg='testing {}'.format(method_name), method_name=method_name
):
py_method_def = self._test_magic_method_basic(
method, ['self', 'key']
)
self._assert_pushes_all_at_once(py_method_def, 'key', 'self')
self._assert_returns_top_of_stack(py_method_def)
def test_context_manager_exit_methods(self) -> None:
"""Test that transpiled __(aexit, exit)__ methods take only self, exc_type, exc_value, and traceback.
For example, def __aexit__ should become def __aexit__(self, exc_type, exc_value, traceback) and it should push traceback, exc_value, exc_type, and self onto the stack before executing the rest of the function. The function should return stack.pop()."""
for method in {'exit', 'aexit'}:
method_name = '__{}__'.format(method)
with self.subTest(
msg='testing {}'.format(method_name), method_name=method_name
):
expected_params = [
'self',
'exc_type',
'exc_value',
'traceback',
]
py_method_def = self._test_magic_method_basic(
method, expected_params
)
self._assert_pushes_all_at_once(
py_method_def, 'traceback', 'exc_value', 'exc_type', 'self'
)
self._assert_returns_top_of_stack(py_method_def)
def test__round__(self) -> None:
"""Test that transpiled __round__ methods take only self and ndigits.
def __round__ should become def __round__(self, ndigits) and it should push ndigits and self onto the stack before executing the rest of the function. The function should return stack.pop()."""
py_method_def = self._test_magic_method_basic(
'round', ['self', 'ndigits']
)
self._assert_pushes_all_at_once(py_method_def, 'ndigits', 'self')
self._assert_returns_top_of_stack(py_method_def)
def test_pow(self) -> None:
"""Test that transpiled __[i]pow__ methods take only self, other, and modulo.
def __[i]pow__ should become def __[i]pow__(self, other, modulo=1) and it should push self, other, and modulo onto the stack before executing the rest of the function. The function should return stack.pop()."""
for method in {'pow', 'ipow'}:
method_name = '__{}__'.format(method)
with self.subTest(
msg='testing {}'.format(method_name), method_name=method_name
):
py_method_def = self._test_magic_method_basic(
method, ['self', 'other', 'modulo']
)
self.assertIsInstance(
py_method_def.args.defaults[-1],
ast.Num,
msg='modulo default is not a number',
)
self.assertEqual(
cast(ast.Num, py_method_def.args.defaults[-1]).n,
1,
msg='wrong modulo default',
)
self._assert_pushes_all_at_once(
py_method_def, 'self', 'other', 'modulo'
)
self._assert_returns_top_of_stack(py_method_def)
def test__contains__(self) -> None:
"""Test that transpiled __contains__ methods take only self and item.
def __contains__ should become def __contains__(self, item) and it should push item and self onto the stack before executing the rest of the function. The function should return stack.pop()."""
py_method_def = self._test_magic_method_basic(
'contains', ['self', 'item']
)
self._assert_pushes_all_at_once(py_method_def, 'item', 'self')
self._assert_returns_top_of_stack(py_method_def)
def test__setitem__(self) -> None:
"""Test that transpiled __setitem__ methods take only self, key, and value.
def __setitem__ should become def __setitem__(self, key, value) and it should push value, key, and self onto the stack before executing the rest of the function. The function should return stack.pop()."""
py_method_def = self._test_magic_method_basic(
'setitem', ['self', 'key', 'value']
)
self._assert_pushes_all_at_once(py_method_def, 'value', 'key', 'self')
self._assert_returns_top_of_stack(py_method_def)
```
#### File: concat/tests/strategies.py
```python
from concat.level1.typecheck.types import (
IndividualType,
SequenceVariable,
StackItemType,
)
from hypothesis.strategies import (
SearchStrategy,
booleans,
composite,
from_type,
iterables,
lists,
register_type_strategy,
sampled_from,
)
from typing import (
Iterable,
Sequence,
Type,
)
def _iterable_strategy(type: Type[Iterable]) -> SearchStrategy[Iterable]:
@composite
def strategy(draw) -> Iterable:
if hasattr(type, '__args__') and type.__args__ == (StackItemType,):
list = []
if draw(booleans()):
list.append(draw(from_type(SequenceVariable)))
list += draw(lists(from_type(IndividualType), max_size=10))
return list
cls = draw(sampled_from([list, tuple, set, frozenset]))
return cls(
draw(iterables(getattr(type, '__args__', object), max_size=10))
)
return strategy()
def _sequence_strategy(type: Type[Sequence]) -> SearchStrategy[Sequence]:
@composite
def strategy(draw) -> Sequence:
cls = draw(sampled_from([list, tuple]))
return cls(draw(_iterable_strategy(type)))
return strategy()
register_type_strategy(Iterable, _iterable_strategy)
register_type_strategy(Sequence, _sequence_strategy)
```
#### File: concat/tests/test_example_programs.py
```python
from scripttest import TestFileEnvironment # type: ignore
import unittest
import os
import sys
import os.path
env = TestFileEnvironment('./test-output', cwd='.')
example_dir = './concat/examples'
examples = [
os.path.join(example_dir, x)
for x in os.listdir(example_dir)
if x.endswith('.cat')
]
class TestExamplePrograms(unittest.TestCase):
"""Test all the examples in concat/examples for correctness."""
def test_examples(self):
"""Test each example.
Ignored files must begin with '# IGNORE'.
Tested files each must start with '# IN: ' followed by the standard
input as a string literal, a newline, and '# OUT: ' followed by the
expected standard output.
"""
for name in examples:
with open(name) as spec, self.subTest(example=name):
inp = spec.readline()
# Ignore the file?
if inp.startswith('# IGNORE'):
continue
in_start, out_start = '# IN: ', '# OUT:'
if not inp.startswith(in_start):
raise Exception(
'No input specified for file {}'.format(name)
)
inp = eval(inp[len(in_start) :].strip())
out = spec.readline()
if not out.startswith(out_start):
raise Exception(
'No output specified for file {}'.format(name)
)
out = eval(out[len(out_start) :].strip())
# scripttest fails loudly if concat exits with a nonzero code
actual = env.run(
sys.executable,
'-m',
'coverage',
'run',
'-m',
'concat',
name,
stdin=inp.encode(),
expect_stderr=True,
)
self.assertEqual(actual.stdout, out)
```
#### File: concat/tests/test_semantics.py
```python
import concat.transpile
import concat.astutils
import concat.level0.parse
from concat.level0.parse import AttributeWordNode, NumberWordNode, TopLevelNode
from concat.level0.stdlib.ski import s, k, i
from concat.level0.lex import Token
from concat.level2.execute import execute
import unittest
from typing import Callable, Iterable, List, Tuple, TypeVar, Union, cast
from hypothesis import given, assume, example
from hypothesis.strategies import (
SearchStrategy,
composite,
integers,
text,
one_of,
sampled_from,
)
ProgramFragment = TypeVar('ProgramFragment', covariant=True)
ProgramFragmentAndEffect = Tuple[ProgramFragment, List[object], List[object]]
@composite
def program(
draw,
) -> ProgramFragmentAndEffect[concat.level0.parse.TopLevelNode]:
children, stack, stash = draw(suite([], []))
return concat.level0.parse.TopLevelNode(Token(), children), stack, stash
@composite
def suite(
draw, init_stack, init_stash
) -> ProgramFragmentAndEffect[concat.astutils.WordsOrStatements]:
# we don't generate level 0 import statements because the higher-level
# visitors don't accept it
stack, stash = init_stack, init_stash
count = draw(integers(min_value=0, max_value=10))
words_and_statements = []
for _ in range(count):
word_or_statement, stack, stash = draw(word(stack, stash))
words_and_statements.append(word_or_statement)
return words_and_statements, stack, stash
@composite
def word(
draw, init_stack, init_stash
) -> ProgramFragmentAndEffect[concat.level0.parse.WordNode]:
def f(strategy: Callable[..., object]) -> SearchStrategy[object]:
return cast(SearchStrategy[object], strategy(init_stack, init_stash))
return draw(
one_of(
*map(
f,
[
number_word,
string_word,
quote_word,
name_word,
attribute_word,
push_word,
],
)
)
)
@composite
def number_word(
draw, init_stack, init_stash
) -> ProgramFragmentAndEffect[concat.level0.parse.NumberWordNode]:
number = draw(integers(min_value=-100, max_value=100))
number_token = Token('NUMBER', repr(number))
return (
concat.level0.parse.NumberWordNode(number_token),
init_stack + [number],
init_stash,
)
@composite
def string_word(
draw, init_stack, init_stash
) -> ProgramFragmentAndEffect[concat.level0.parse.StringWordNode]:
string = draw(text(max_size=100))
string_token = Token('STRING', repr(string))
return (
concat.level0.parse.StringWordNode(string_token),
init_stack + [string],
init_stash,
)
@composite
def quote_word(
draw, init_stack, init_stash
) -> ProgramFragmentAndEffect[concat.level0.parse.QuoteWordNode]:
sub_words = []
length = draw(integers(min_value=0, max_value=100))
stack, stash = init_stack, init_stash
for _ in range(length):
sub_word, stack, stash = draw(word(stack, stash))
sub_words.append(sub_word)
return concat.level0.parse.QuoteWordNode(sub_words, (0, 0)), stack, stash
@composite
def name_word(
draw, init_stack, init_stash
) -> ProgramFragmentAndEffect[concat.level0.parse.NameWordNode]:
name = draw(sampled_from('iks'))
name_token = Token('NAME', name)
return (
concat.level0.parse.NameWordNode(name_token),
*static_call(name, init_stack, init_stash),
)
@composite
def attribute_word(
draw, init_stack, init_stash
) -> ProgramFragmentAndEffect[concat.level0.parse.AttributeWordNode]:
assume(init_stack)
*stack, obj = init_stack
stash = init_stash[:]
callable_attributes = [
attr for attr in dir(obj) if callable(getattr(obj, attr))
]
assume(callable_attributes)
# callable_attributes cannot be empty here
attribute = draw(sampled_from(callable_attributes))
try:
getattr(obj, attribute)(stack, stash)
except (TypeError, ValueError):
assume(False)
attribute_token = Token('NAME', attribute)
return concat.level0.parse.AttributeWordNode(attribute_token), stack, stash
@composite
def push_word(
draw, init_stack, init_stash
) -> ProgramFragmentAndEffect[concat.level0.parse.PushWordNode]:
sub_word, stack, stash = draw(word([], []))
push_word = concat.level0.parse.PushWordNode(sub_word)
return (
push_word,
*static_push(sub_word, stack, stash, init_stack, init_stash),
)
def static_push(
word: concat.level0.parse.WordNode,
stack: List[object],
stash: List[object],
init_stack: List[object],
init_stash: List[object],
) -> Tuple[List[object], List[object]]:
if isinstance(
word,
(
concat.level0.parse.NumberWordNode,
concat.level0.parse.StringWordNode,
),
):
literal_node = cast(
Union[
concat.level0.parse.NumberWordNode,
concat.level0.parse.StringWordNode,
],
word,
)
return (
init_stack
+ [lambda stack, stash: stack.append(literal_node.value)],
init_stash,
)
if isinstance(word, concat.level0.parse.QuoteWordNode):
def pushed_quote(stack_, stash_):
return (
stack_.extend(stack),
stash_.extend(stash),
)
return init_stack + [pushed_quote], init_stash
if isinstance(word, concat.level0.parse.NameWordNode):
return init_stack + [{'s': s, 'k': k, 'i': i}[word.value]], init_stash
if isinstance(word, concat.level0.parse.AttributeWordNode):
assume(init_stack)
assume(hasattr(init_stack[-1], word.value))
return (
init_stack[:-1] + [getattr(init_stack[-1], word.value)],
init_stash,
)
# I'm not sure how to deal with pushed pushed quotations
assume(not isinstance(word, concat.level0.parse.PushWordNode))
raise TypeError(word)
def static_call(
name: str, stack: List[object], stash: List[object]
) -> Tuple[List[object], List[object]]:
stack, stash = stack[:], stash[:]
if name == 's':
assume(len(stack) >= 3)
assume(all(map(callable, stack[-3:])))
s(stack, stash)
elif name == 'k':
assume(len(stack) >= 2)
assume(all(map(callable, stack[-2:])))
k(stack, stash)
elif name == 'i':
assume(len(stack) >= 1)
assume(all(map(callable, stack[-1:])))
i(stack, stash)
else:
raise ValueError(name)
return stack, stash
def stacks_equal(
actual_stacks: Iterable[List[object]],
expected_stacks: Iterable[List[object]],
) -> bool:
return all(map(stack_equal, actual_stacks, expected_stacks))
def stack_equal(
actual_stack: List[object], expected_stack: List[object]
) -> bool:
for actual_item, expected_item in zip(actual_stack, expected_stack):
if callable(expected_item) and callable(actual_item):
stack: List[object]
stash: List[object]
stack_2: List[object]
stash_2: List[object]
stack, stash = [], []
stack_2, stash_2 = [], []
actual_item(stack, stash)
expected_item(stack_2, stash_2)
if not stacks_equal([stack, stash], [stack_2, stash_2]):
return False
else:
if actual_item != expected_item:
return False
return True
class TestDynamicSemantics(unittest.TestCase):
@example(
prog=(
TopLevelNode(
Token('ENCODING', '', (0, 0)),
[
NumberWordNode(Token('NUMBER', '0', (0, 0))),
AttributeWordNode(Token('NAME', '__init__', (0, 0))),
],
),
[],
[],
)
)
@given(program())
def test_generated_program(self, prog):
module = concat.transpile.transpile_ast(prog[0])
stack, stash = [], []
execute(
'<test_prog>',
module,
{'stack': stack, 'stash': stash, 's': s, 'k': k, 'i': i},
)
self.assertTrue(stacks_equal([stack, stash], list(prog[1:])))
``` |
{
"source": "jmanuel1/patterns",
"score": 3
} |
#### File: patterns/test/test_builder.py
```python
import unittest
import patterns.builder
class ArgCatcher:
"""Class to catch arguments passed by a builder."""
def __init__(self, **kwargs):
"""`**kwargs` passed by a builder and kept in `self.kwargs`."""
self.kwargs = kwargs
class BuilderTest(unittest.TestCase):
"""Builder test case."""
def test_builder(self):
"""Test patterns.builder.Builder class."""
builder = patterns.builder.Builder(ArgCatcher)
builder.set_string('life, the universe, and everything').set_answer(42)
arg_catcher = builder.set_question('six by nine').build()
self.assertEqual({'string': 'life, the universe, and everything',
'answer': 42,
'question': 'six by nine'},
arg_catcher.kwargs,
'builder does not properly recieve arguments')
``` |
{
"source": "JManzur/flask-demo",
"score": 2
} |
#### File: JManzur/flask-demo/app.py
```python
from flask import Flask, render_template
from healthcheck import HealthCheck
import random
import urllib.request
app = Flask(__name__)
health = HealthCheck(app, "/status")
# list of cat images
images = [
"https://media.giphy.com/media/mlvseq9yvZhba/giphy.gif",
"https://media.giphy.com/media/VbnUQpnihPSIgIXuZv/giphy.gif",
"https://media.giphy.com/media/3nbxypT20Ulmo/giphy.gif",
"https://media.giphy.com/media/CjmvTCZf2U3p09Cn0h/giphy.gif",
"https://media.giphy.com/media/l6Td5sKDNmDGU/giphy.gif",
"https://media.giphy.com/media/CqVNwrLt9KEDK/giphy.gif",
"https://media.giphy.com/media/fjxMEdpMT9qDyBVLL4/giphy.gif",
"https://media.giphy.com/media/8JIRQqil8mvEA/giphy.gif",
"https://media.giphy.com/media/26n6xF5M2Ht4eKdO0/giphy.gif",
"https://media.giphy.com/media/xH7Yh3DSNvn4k/giphy.gif",
"https://media.giphy.com/media/GeimqsH0TLDt4tScGw/giphy.gif",
"https://media.giphy.com/media/Nm8ZPAGOwZUQM/giphy.gif"
]
def demo_available():
code = urllib.request.urlopen("http://127.0.0.1:5000").getcode()
print(code)
if code == 200:
return True, "OK"
else:
return False, "ERROR"
health.add_check(demo_available)
@app.route('/')
def index():
url = random.choice(images)
return render_template('index.html', url=url)
if __name__ == "__main__":
app.run(host="0.0.0.0")
``` |
{
"source": "JMan-Zx/game_of_hog",
"score": 3
} |
#### File: JMan-Zx/game_of_hog/game_matrix.py
```python
from functools import lru_cache
from math import comb
import decimal
from rules import free_bacon, is_swine_align, is_pig_pass
# assume standard dice
DICE_SIDE = 6
# maximum number of simultaneous rolls
MAX_ROLL = 10
# given some number of dice with specified sides/faces
# return number of possible rolls that sums to target
@lru_cache(maxsize=None)
def roll_chance(dice_side, dice_count, target):
total = 0
# formula from: http://mathforum.org/library/drmath/view/52207.html
sum_top = (target - dice_count) // dice_side + 1
for k in range(sum_top):
total += ((-1) ** k
* comb(dice_count, k)
* comb(target - dice_side * k - 1,
dice_count - 1))
return total
# calculate the chance of summing to target when case of rolling 1 is concern
def roll_chance_special_1(dice_side, dice_count, target):
# using example of 6-sided dice
# to calculate number of events where 1 is not rolled
# but still sums to target
# modify to 5 sided die with only sides 2-6
# then transform 5-sided die with 2-6 to 5-sided die with 1-5
total_events = decimal.Decimal(roll_chance(dice_side, dice_count, target))
no_1 = decimal.Decimal(
roll_chance(dice_side - 1, dice_count, target - dice_count))
has_1 = total_events - no_1
return (has_1, no_1)
# the win_rate at a particular cell
# when taken into account alternating turns
# and additional turn rules
@lru_cache(maxsize=None)
def win_rate(new_score, opponent_score):
# win guaranteed with this score (already won)
if new_score >= 100:
return 1
# if taking another turn, use the win_rate at that cell
# equivalent to starting the turn there
if (is_swine_align(new_score, opponent_score)
or is_pig_pass(new_score, opponent_score)):
return game_matrix(new_score, opponent_score)[1]
# otherwise, opponent takes a turn,
# win_rate is equivalent to opponent not winning in that situation
return 1 - game_matrix(opponent_score, new_score)[1]
# construct the overall matrix lazily
# game_matrix(a, b), where a is score of current player
# and b is score of opponent, returns a tuple
# 1st: the optimal number of dice to roll for player a
# 2nd: the probability of player a winning if playing optimally
@lru_cache(maxsize=None)
def game_matrix(player_score, opponent_score):
assert(player_score < 100 or opponent_score < 100)
# choice does not matter, already won/lost
if player_score >= 100:
return (0, 1)
elif opponent_score >= 100:
return (0, 0)
else:
# assume 0 (free_bacon) is the best strategy
best_dice_count = 0
best_win_rate = win_rate(player_score + free_bacon(opponent_score),
opponent_score)
for dice_count in range(1, MAX_ROLL + 1):
curr_win_rate = 0
total_poss = DICE_SIDE ** dice_count
for target in range(dice_count, dice_count * DICE_SIDE + 1):
has_1, no_1 = roll_chance_special_1(
DICE_SIDE, dice_count, target)
has_1_win_rate = (has_1
* win_rate(player_score + 1, opponent_score)
/ total_poss)
no_1_win_rate = (no_1
* win_rate(player_score + target,
opponent_score)
/ total_poss)
curr_win_rate += has_1_win_rate + no_1_win_rate
assert(curr_win_rate <= 1 and curr_win_rate >= 0)
if curr_win_rate > best_win_rate:
best_dice_count = dice_count
best_win_rate = curr_win_rate
return (best_dice_count, best_win_rate)
print(game_matrix(84, 99))
```
#### File: JMan-Zx/game_of_hog/rules.py
```python
pi = '3.14159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706798214808651328230664709384460955058223172535940812848111745028410270193852110555964462294895493038196'
def gcd(a, b):
assert(a != 0 and b != 0)
while b != 0:
a, b = b, a % b
return a
def free_bacon(opponent_score):
k = 0
if opponent_score == 0:
k = 3
else:
assert(opponent_score < 200)
k = int(pi[opponent_score + 2])
return k + 3
def is_swine_align(player_score, opponent_score):
return player_score > 0 and opponent_score > 0 and gcd(player_score, opponent_score) >= 10
def is_pig_pass(player_score, opponent_score):
return player_score < opponent_score and opponent_score - player_score < 3
``` |
{
"source": "jmao-denver/deephaven-core",
"score": 2
} |
#### File: python/core/deephaven_jpy_init.py
```python
import dill
import base64
import jpy
import sys
import os
import pandas
import logging
# Set stdin to /dev/null to prevent functions (like help()) that attempt to read from stdin from hanging the worker.
os.dup2(os.open("/dev/null", os.O_RDONLY), 0)
jpy.VerboseExceptions.enabled = True
# If you want jpy to tell you about all that it is doing, change this
# jpy.diag.flags = jpy.diag.F_ALL
from deephaven import Config
import deephaven.TableTools as ttools
from deephaven.python_to_java import dataFrameToTable
# NOTE: **THIS IS REQUIRED** for WorkerPythonEnvironment - don't take it out...
import __main__
###############################################################################################
# can't use IsWidget.get_iris_table or the data frame will be interpreted as a table instead of a widget?
pandas.DataFrame.to_iris_table = lambda self: dataFrameToTable(self, True)
########################################################################################################
# Performance monitoring
def add_tables_from_map_to_binding(tables):
"""
Iterates through a (java) map of tables and adds them to the global binding. This is a helper method
accommodating for the fact that the performance queries return the table results as a (java) HashMap object.
:param tables: java HashMap
"""
it = tables.entrySet().iterator()
while it.hasNext():
pair = it.next()
globals()[pair.getKey()] = pair.getValue()
it.remove()
def query_update_performance_set(evaluationNumber):
"""
Name matched to Groovy equivalent for coherent usage.
TODO: document this
:param worker_name:
:param date:
:param use_intraday:
:param server_host:
:param as_of_time:
:return:
"""
_jtype_ = jpy.get_type("io.deephaven.db.v2.utils.PerformanceQueries")
tableMap = _jtype_.queryUpdatePerformanceMap(evaluationNumber)
add_tables_from_map_to_binding(tableMap)
# Convenience functions for a variety of purposes
def importjava(clazz):
_jclass_ = jpy.get_type("java.lang.Class")
def _get_short_class_name_(clazz):
return _jclass_.forName(clazz).getSimpleName()
clazz = clazz.strip()
javaclass = _get_short_class_name_(clazz)
globals()[javaclass] = jpy.get_type(clazz)
db.importClass(_jclass_.forName(clazz))
def importstatic(clazz):
_jclass_ = jpy.get_type("java.lang.Class")
clazz = clazz.strip()
javaclass = jpy.get_type(clazz)
for key, value in javaclass.__dict__.items():
if not key.startswith("__"):
if hasattr(value, 'methods'):
methods = value.methods
if methods:
if methods[0].is_static:
globals()[value.name] = value
db.importClass(_jclass_.forName(clazz))
def java_array(type, values):
return jpy.array(type, values)
def IntArray(values):
return java_array('int', values)
def DoubleArray(values):
return java_array('double', values)
def FloatArray(values):
return java_array('float', values)
def LongArray(values):
return java_array('long', values)
def ShortArray(values):
return java_array('short', values)
def BooleanArray(values):
return java_array('boolean', values)
def ByteArray(values):
return java_array('byte', values)
def _exists_and_is_file_(prop):
"""
For finding a fully qualified customer configured import file
:param prop: a stem of the import file
:return: the fully qualified file path
"""
config = Config()
file_path = config.getStringWithDefault(prop, None)
if file_path:
file_path = file_path.replace("<devroot>", config.getDevRootPath())
if os.path.isfile(file_path):
return file_path
return None
def _empty_or_comment_(line):
"""helper method for extracting a line"""
return line is None or len(line.strip()) < 1 or line.strip().startswith("#")
######################################################################################################
# Perform the desired imports for the console workspace
# NOTE: these can't be moved into a method or the imports would be out of scope for the console
default_imports = _exists_and_is_file_("python.default.imports")
if default_imports:
# test if line matches pattern "import <stuff>" or "from <stuff> import <stuff>"
import re
import_re = '^(?:import|(from(\s+)(.+))import)(\s+)(.+)'
import_pattern = re.compile(import_re)
with open(default_imports, 'r') as f:
for line in f:
# note that this pattern is repeated, but I want to avoid a method polluting the namespace
if _empty_or_comment_(line):
# this line is empty or a comment
continue
if ";" in line:
logging.error("Can not run line \n{}\n from python.default.imports, contains a ';'".format(line))
continue
try:
if import_pattern.match(line):
exec(line)
else:
logging.error("Could not run line \n{}\n from python.default.imports, "
"does not match import pattern" .format(line))
except ImportError:
logging.error("Could not import module: {}".format(line))
del import_re, import_pattern
default_imports = _exists_and_is_file_("python.default.javaclass.imports")
if default_imports:
with open(default_imports) as f:
for line in f:
if _empty_or_comment_():
continue
# noinspection PyBroadException
try:
importjava(line)
except Exception:
logging.error("Could not import java class: {}".format(line))
# clean up the workspace, since this is forwarded as the console namespace
del _empty_or_comment_, _exists_and_is_file_, default_imports
``` |
{
"source": "jmaq-cr/BMC-Pr2-RutasMetabolicas",
"score": 3
} |
#### File: BMC-Pr2-RutasMetabolicas/src/semiglobal_alignment.py
```python
import numpy as np
import global_alignment
MISMATCH = -1
MATCH = 1
GAP = -2
# LLENA LA PRIMER FILA Y LA PRIMER COLUMNA DE LA MATRIZ CON LOS VALORES CORRESPONDIENTES
def fill_first_values():
# FILL FIRST ROW
first_line = global_alignment.matrix[0]
for column in range(1, len(first_line)):
first_line[column] = first_line[column-1] + 0
global_alignment.matrix[0] = first_line
# FILL FIRST COLUMN
for row in range(1, len(global_alignment.matrix)):
global_alignment.matrix[row][0] = global_alignment.matrix[row-1][0]+ 0
# OBTIENE EL NuMERO MaXIMO DE LA uLTIMA FILA Y COLUMNA
def max_number_semiglobal():
amount_rows = len(global_alignment.matrix)-1
num_max_row = max(global_alignment.matrix[amount_rows])
last_numb_col = []
i = 0
while amount_rows >= 0:
amount_rows = amount_rows + -1
row = global_alignment.matrix[i]
i = i + 1
last_numb_col += [row[-1]]
col_max = max(last_numb_col)
num_max = max(col_max, num_max_row)
return num_max
# CONSTRUYE EL ALINEAMIENTO OPTIMO CON LOS PUNTAJES DE LA MATRIZ
def traceback(sequence1, sequence2):
alignments = []
alignmentA = ""
alignmentB = ""
i = len(sequence2)-1
j = len(sequence1)-1
max_number = max_number_semiglobal()
try:
j = global_alignment.matrix[len(global_alignment.matrix)-1].index(max_number)
i = len(global_alignment.matrix)-1
except:
j = len(global_alignment.matrix[0])-1
i=0
while True:
if(global_alignment.matrix[i][j]==max_number):
break
i += 1
while(i>0 or j>0):
if(i>0 and j>0 and global_alignment.matrix[i][j] == global_alignment.matrix[i-1][j-1] +
global_alignment.getScore(sequence1, sequence2, i, j)):
alignmentA = sequence1[j] + alignmentA
alignmentB = sequence2[i] + alignmentB
global_alignment.matrix[i][j] = 'D'+str(global_alignment.matrix[i][j])
i = i-1
j = j-1
elif((i>0 and global_alignment.matrix[i][j] == global_alignment.matrix[i-1][j] + global_alignment.GAP) or j==0):
alignmentA = "--" + alignmentA
alignmentB = sequence2[i] + alignmentB
global_alignment.matrix[i][j] = "A" + str(global_alignment.matrix[i][j])
i = i-1
elif((j>0 and global_alignment.matrix[i][j] == global_alignment.matrix[i][j-1] + global_alignment.GAP) or i==0):
alignmentA = sequence1[j] + alignmentA
alignmentB = "--" + alignmentB
global_alignment.matrix[i][j] = "I"+str(global_alignment.matrix[i][j])
j = j-1
alignments.append(alignmentA)
alignments.append(alignmentB)
global_alignment.add_sequences(sequence1, sequence2)
return alignments
# EJECUTA EL ALGORITMO COMPLETO
def semiglobal_alignment(sequence1, sequence2):
global_alignment.clean_matrix()
global_alignment.init_matrix(sequence1, sequence2) # INICIALIZA LA MATRIZ DE PUNTAJES CON CEROS
fill_first_values() # LLENA LA PRIMER FILA Y LA PRIMERA COLUMNA
global_alignment.fill_matrix(sequence1, sequence2) # CALCULA TODOS LOS PUNTAJES
new_col = max_number_semiglobal()
alignments = traceback(["--"]+sequence1,["--"]+sequence2) # OBTIENE EL ALINEAMIENTO OPTIMO
alignments.append(global_alignment.alignmentScore(alignments[0], alignments[1]))
return alignments
#UPDATE: Test case for Metabolic-Pathways, by @kecastro.
#ROUTE0 = 'ABCDEFGHIJ'
#ROUTE1 = 'KLMNOPQRST'
#print(semiglobal_alignment(ROUTE0, ROUTE1))
``` |
{
"source": "jmarangola/cv-chess",
"score": 3
} |
#### File: robotics/control/kinematics.py
```python
import robot_parameters
from robot_parameters import A1, A2
import numpy as np
import math
from numpy import random
from scipy.spatial import distance
def get_theta_two(x, y):
"""
Get theta2 angle for robot arm given (x, y) world position
Args:
x (double): end effector x position
y (double): end effector y position
Returns:
double: theta2 value (in radians)
"""
x_sq = math.pow(x, 2)
y_sq = math.pow(y, 2)
return np.arccos((x_sq + y_sq - math.pow(A1, 2) - math.pow(A2, 2))/(2 * A1 * A2))
def get_theta_one(x, y, thet2):
"""
Get theta1 angle of robot arm for a given (x, y, theta2) configuration (theta2 must be pre-computed).
Args:
x (double): end effector x position
y (double): end effector y position
thet2 (double): theta2 position defined relative to theta 1 (in radians)
Returns:
double: theta1 position (in radians)
"""
tx = A2*np.sin(thet2)*x + (A1 + A2 * np.cos(thet2)*y)
ty = (A1 + A2 * np.cos(thet2)) * x - A2 * np.sin(thet2) * y
return np.arctan2(tx, ty)
def inv_theta(pos):
"""
Inverse kinematic computation of (theta1, theta2) from (x, y) world position
Args:
pos (list): List of end effector position doubles [x, y]
Returns:
list: List of joint positions [theta1, theta2]
"""
x, y = pos[0], pos[1]
thet2 = get_theta_two(x, y)
return (get_theta_one(x, y, thet2), thet2)
def linear_motion_seq(pos):
tmp_pos = inv_theta(pos[1:])
angles = []
for i in range(len(tmp_pos)):
angles.append(tmp_pos[i] * robot_parameters.RR[i] * 200 * robot_parameters.RR[i].MICROSTEP_REV[i])
return [pos[0], angles[0], angles[1]]
def closest_reference(point, points):
closest_index = distance.cdist([point], points).argmin()
return points[closest_index]
def brute_lookup():
pass
```
#### File: core/robotics/teensy_serial.py
```python
import serial
from time import sleep
PORT = "/dev/tty.usbmodem101396701"
def send_wait_for_tx(ser, resp_msg, maxTimeMS=60000, port=PORT, delay=0.001):
i = 0
while True:
res = ser.readline().decode()
if i >= maxTimeMS:
print("Maximum iterations reached. Exiting wait_for_response: -1")
return False
if res == resp_msg:
return True
sleep(delay)
i += delay
def send_serial(raw_position):
with serial.Serial() as ser:
ser.port="/dev/tty.usbmodem101396701"
ser.baudrate = 115200
ser.open()
tx = ",".join(raw_position) + "\n"
tx = tx.encode()
ser.write(tx)
ser.flush()
rx = ser.readline()
send_wait_for_tx(ser, "recd")
```
#### File: core/vision/collection.py
```python
import datasets
import json
from datasets import Board, ChessPiece, PieceColor, PieceType
#from realsense_utils import RealSenseCamera
import preprocessing as pr
import cv2
import pandas as pd
import os
from os.path import isfile, join
import uuid
import numpy as np
import uuid
from PIL import Image
from PIL.ExifTags import TAGS
RUN_CALIBRATION = False # Run calibration sequence or use preexisting board four corners data from config/setup.txt
BOARD_SAVE_DEST= r"board_metadata.jpeg" # Where the debug metadata board visualization image is saved (to ensure we properly setup the metadata)
TMP_DEST = "/home/spark/cv-chess/core/vision/tmp/" # Where images are temporarily saved before being uploaded to drive in a batch
LOCAL_MD_FILENAME = "local_meta.json"
LOCAL_METADATA_JSON_PATH = TMP_DEST + LOCAL_MD_FILENAME
TL = [250, 115]
BL = [250, 687]
TR = [825, 115]
BR = [825, 687]
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def fen_to_dict(string):
name_to_num = {
'p' : 1,
'b' : 2,
'n' : 3,
'r' : 4,
'q' : 5,
'k' : 6,
}
out = {}
letters = "ABCDEFGH"
for i in range(8):
for j in range(1,9):
out[letters[i] + str(j)] = 0
string = string.split('/')
new_string = []
for s in string:
for d in s:
if d.isnumeric():
ix = s.index(d)
for i in range(int(d)-1):
s = s[0:ix] + '1' + s[ix:]
new_string.append(s)
for i in range(8, 0, -1):
for j in range(8):
if new_string[8-i][j].isnumeric():
out[letters[j] + str(i)] = 0
else:
out[letters[j] + str(i)] = name_to_num[new_string[8-i][j].lower()]
return out
def get_sorted_time_saved(images):
"""
Given a list of image filenames, return a dictionary of image filename : time written to disk pairs.
Purpose: for debugging dataset
Args:
images (list): List of image filenames
Returns:
dict: dict of image filenames
"""
image_dat = []
for image in images:
imtmp = Image.open(image)
tmp = imtmp.getexif()
image_dat.append(tmp)
dt = {}
for exifdata in image_dat:
idx = image_dat.index(exifdata)
# iterating over all EXIF data fields
for tag_id in exifdata:
tag = TAGS.get(tag_id, tag_id)
data = exifdata.get(tag_id)
# decode bytes
if isinstance(data, bytes):
data = data.decode()
# Add datetime field
if tag == "DateTime":
dt[images[idx]] = data
print(f"{tag:25}: {data}")
output = sorted(dt.items(), key=lambda eta: eta[1], reverse=False)
print(output)
dt = {}
for item in output:
dt[item[0]] = item[1]
with open(TMP_DEST + "datetimes.json", "w") as wr: # dump to json
json.dump(output, wr)
return output
def del_batch_from_text_file(file):
filenames = []
with open(file, "r") as rd:
for line in rd.readlines():
# parse each line for file to delete:
commaIndex = line.index(",")
filename = line[:commaIndex]
os.remove(TMP_DEST + filename)
if __name__ == "__main__":
# Initialize camera
realsense = RealSenseCamera()
"""
# Check if calibration sequence must be run
if RUN_CALIBRATION:
realsense.calibrate_board_pos()
if realsense.get_board_corners() is None:
print("Failed to run calibration. Exiting...")
exit()
"""
"""
board_meta = Board()
# Add pieces to metadata csv
board_meta.add_pieces({
"A1":ChessPiece(PieceType.KNIGHT, PieceColor.BLUE), "A2":ChessPiece(PieceType.PAWN, PieceColor.BLUE), "A3":ChessPiece(PieceType.PAWN, PieceColor.ORANGE)
})
board_meta.display_board(dest=BOARD_SAVE_DEST)
print(f"Verify board is correct output dest={BOARD_SAVE_DEST}.\nContine [Y] or Exit [E]?")
validate = input()
if validate.upper() == "E" or validate.upper() == "N":
print("Exiting...")
realsense.stop_pipeline()
exit()
files = []
files = [f for f in os.listdir(TMP_DEST) if isfile(os.path.join(TMP_DEST, f))]
# Check to see if there is pre-existing .csv metadata to add to
if LOCAL_MD_FILENAME in files:
try:
total_metadata = pd.read_csv(LOCAL_METADATA_JSON_PATH)
except:
total_metadata = pd.DataFrame()
else:
total_metadata = pd.DataFrame()
# Loop through input
while input() != "exit":
img = realsense.capture_rgb_image() # Capture the image
img = img[105:690, 348:940, :]
img = rotate_image(img, 1.5)
files = pr.board_to_64_files(img, base_directory=TMP_DEST) # Break image up into 64 files
piece_types, piece_colors = [], []
batch_id = uuid.uuid1()
for tile in sorted(files.keys()):
temp = board_meta.get_chess_piece(tile)
if temp is None:
piece_types.append(None)
piece_colors.append(None)
else:
piece_types.append(temp.piece_type.name)
piece_colors.append(temp.piece_color.name)
tmp_meta = pd.DataFrame({
"File" : [files[file] for file in files.keys()],
"Position" : [file for file in files.keys()],
"Piece Type" : piece_types,
"Piece Color" : piece_colors,
"Batch ID" : [batch_id for i in range(len(files.keys()))]
})
frames = [total_metadata, tmp_meta]
total_metadata = pd.concat(frames) # Concatenate dataframes
print(total_metadata)
total_metadata.to_csv(path_or_buf=LOCAL_METADATA_JSON_PATH)
"""
#pr.delete_board2_64_output(base_directory=TMP_DEST)
FEN = "5P1R/1Q1RP1P1/3R1P2/QQPPK1R1/1B1K1N2/B1R2N1B/1N2B3R/2B1BN2".upper()
last_input = None
df = pd.DataFrame()
while input() != "end":
resp = input("[n] for new fen, [anything key to take an image] >")
if resp == "new":
fen = input("Enter a FEN:").upper()
img = realsense.capture_rgb_image() # Capture the image
print("Captured image")
img = img[105:690, 348:940, :]
img = rotate_image(img, 1.5)
cv2.imwrite("original.jpg", img)
# Get dict of positions
temp_dict = fen_to_dict(FEN)
tiles = pr.board_to_64_files(img, temp_dict, base_directory=TMP_DEST) # Break image up into 64 files
data_frame = pd.DataFrame(tiles)
data_frame = data_frame.transpose()
frames = [df, data_frame]
df = pd.concat(frames) # Concatenate dataframe
csv_file = df.to_csv(TMP_DEST + 'my_csv.csv', header=False, index=False)
# Close streams and end pipeline
realsense.stop_pipeline()
```
#### File: vision/datasets/dataloader.py
```python
import numpy as np
from enum import Enum
from numpy.core.fromnumeric import resize
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2
from time import sleep
import os
# Chessboard tile color enum
class TileColor(Enum):
BLACK = 1
WHITE = 2
# Chess piece enum
class PieceType(Enum):
PAWN = 1
KNIGHT = 2
BISHOP = 3
ROOK = 4
QUEEN = 5
KING = 6
# An object that encapsulates the piece and piece color associated with that piece at a chess tile (both are None in absence of piece)
class ChessPiece:
def __init__(self, piece_type=None, piece_color=None):
self.piece_type = piece_type
self.piece_color = piece_color
# Return array representation of Tile object
def to_array(self):
return [self.position, self.piece, self.tile_color]
# Class for color of chess pieces
class PieceColor(Enum):
ORANGE = 1
BLUE = 2
# Chessboard Class used to easily access any attributes of the chessboard or the pieces on the chessboard
class Board:
# Dictionary of {Position : TileColor} values
CHESS_TILES = {list("ABCDEFGH")[x]+str(y):TileColor((y+x+1)%2+1) for x in range(len(list("ABCDEFGH"))) for y in range(1, 9)}
VISUAL_PATH = r"../../resources/visual"
def __init__(self):
self.board = {}
self.n_pieces = 0
"""
Add a single piece to the board @ a position (ex. "A1")
Parameters: piece_data (dict) format: {position : Tile()}
"""
def add_pieces(self, pieces_dict):
for key in pieces_dict:
# Increment piece number if it is a new square:
if key not in self.board:
self.n_pieces += 1
# Add piece to dictionary self.board:
self.board[key.upper()] = pieces_dict[key]
"""
Function for visualizing chessboard objects as images
"""
def display_board(self, dest=None):
# Create base chess board of B/W pixels
tile_colors = np.zeros(64*3).reshape(8, 8, 3)
tile_colors[1::2, :-1:2, :] = 1
tile_colors[::2, fc00:db20:35b:7399::5, :] = 1
# Scale up board to 1024x1024:
resized_board = cv2.resize(tile_colors, (1024, 1024), 0, 0, interpolation=cv2.INTER_NEAREST)
x_translation = {x:ord(x) - ord("A") for x in list("ABCDEFGH")}
# Labels for visual board
visual_labels = {
PieceType.PAWN: "P",
PieceType.KING: "K",
PieceType.KNIGHT: "KN",
PieceType.BISHOP: "B",
PieceType.QUEEN: "Q",
}
visual_colors = {PieceColor.ORANGE : (0, 0, 255), PieceColor.BLUE : (255, 0, 0)}
for position in self.board:
if self.board[position] is not None:
center_x = (int(x_translation[position[0]])) * 1024//8 + 1024//32
center_y = 1024 - (int(position[1]) - 1) * 1024//8 - 1024//32
font = cv2.FONT_HERSHEY_SIMPLEX
# Label board with proper pieces and colors
if self.board[position].piece_type != PieceType.KNIGHT:
cv2.putText(resized_board, visual_labels[self.board[position].piece_type], (center_x,center_y), font, 3, visual_colors[self.board[position].piece_color], 2, cv2.LINE_8)
# Knight label has two characters, ensure it is centered
else:
cv2.putText(resized_board, visual_labels[self.board[position].piece_type], (center_x-35,center_y), font, 3, visual_colors[self.board[position].piece_color], 2, cv2.LINE_8)
if dest is None:
cv2.imshow("cv-chessboard test", resized_board)
# Wait to kill the board image
cv2.waitKey(100000)
else:
cv2.imwrite(dest, 255*resized_board)
"""
Returns the ChessPiece at a position, returns None if empty
"""
def get_chess_piece(self, position):
return self.board.get(position, None)
"""
Get color of the chessboard at a position
Returns: TileColor object
"""
def get_tile_color(self, position):
return self.CHESS_TILES[position.upper()]
"""
Return a .csv representation of the board
"""
def to_csv(self):
datafr = pd.DataFrame(
{ "Position" : [position for position in sorted(self.board.keys())],
"Piece Type" : [self.board[position].piece_type.name for position in sorted(self.board.keys())],
"Piece Color" : [self.board[position].piece_color.name for position in sorted(self.board.keys())],
"Tile Color" : [self.get_tile_color(position).name for position in sorted(self.board.keys())]
}
)
return datafr
```
#### File: core/vision/uploader.py
```python
from logging import root
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from tqdm import tqdm
import pandas as pd
import queue
import sys
import os
from os.path import isfile, join
from time import perf_counter
import collection
import cv2
LOCAL_PATH_TO_TMP = "/Users/johnmarangola/Desktop/repos/cv-chess/core/vision/tmp/"
DATASET_METADATA_FILENAME = "my_csv.csv"
METADATA_FIELDS = ["File", "Piece Color", "Piece Type", "Position", "ID", "Batch ID"]
from concurrent.futures import ThreadPoolExecutor
from time import perf_counter
import preprocessing as pre
def get_id(drive, name):
"""
Get the ID of a file in Google Drive
Args:
name (str): Filename
Returns:
str: Google drive file ID
"""
file_list = drive.ListFile({'q': "'root' in parents and trashed=false"}).GetList()
ids = []
for file1 in file_list:
if file1["title"] == name:
ids.append(file1["id"])
if len(ids) == 1: return ids[0]
return None
def download(drive, filename):
"""
Download a file from root directory of google drive
Args:
GoogleDrive object: Access to google drive
filename (str): Filename to download
Returns:
[type]: [description]
"""
_id = get_id(filename)
if _id is None:
return False
temp = drive.CreateFile({'id':_id})
temp.GetContentFile(filename)
return True
def upload_as_child(drive, filename, folder_id):
"""
Upload a file to a parent folder
Args:
drive (GoogleDrive object): Access to Google Drive
filename (str): Name of file to be uploaded
folder_id (str): Parent folder drive ID
Returns:
GoogleDriveFile: Uploaded file
"""
image_file = drive.CreateFile({'parents': [{'id': folder_id}]})
image_file.SetContentFile(filename)
image_file.Upload()
return image_file
def create_root_folder(drive, name):
"""
Create a root folder in Google Drive
Args:
drive (GoogleDrive object): Access to google drive
name (str): Folder name
Returns:
str: Folder ID
"""
for file in drive.ListFile({'q': f"'root' in parents and trashed=false"}).GetList():
if file['title'] == name:
return None
root_folder = drive.CreateFile({'title':name, 'mimeType':"application/vnd.google-apps.folder"})
root_folder.Upload()
return root_folder['id']
def add_sub_directory(drive, parent_id, sub_dir):
"""
Add subfolder to parent directory
Args:
drive (GoogleDrive object): Access to google drive
parent_id (str): ID of parent directory
sub_dir (str): Name of subfolder
Returns:
str: ID of subfolder
"""
# check to make sure sub-directory does not exist yet:
for file in drive.ListFile({'q': f"'{parent_id}' in parents and trashed=false"}).GetList():
if file['title'] == sub_dir:
return False
sub_dir = drive.CreateFile({'title':sub_dir,"parents":[{'id':parent_id}],'mimeType':"application/vnd.google-apps.folder"})
sub_dir.Upload()
return sub_dir['id']
def upload_local_dataset(dataset_name, folder_id, local_path=LOCAL_PATH_TO_TMP, metadata_filename=DATASET_METADATA_FILENAME):
"""
Upload a local dataset to a Google Drive that named dataset_name.
Args:
dataset_name (str): Name of dataset to be uploaded to Google Drive.
folder_id (str): Google drive ID of folder that the dataset is uploaded within.
local_path (str, optional): Local absolute path of cv-chess/core/vision/tmp/. Defaults to LOCAL_PATH_TO_TMP.
metadata_filename (str optional): Name of metadata file (includes .csv). Defaults to DATASET_METADATA_FILENAME.
Returns:
[type]: [description]
"""
# Read in local metadata
os.chdir(local_path)
try:
local_meta = pd.read_csv(metadata_filename)
except:
print(f"Unable to load {metadata_filename} from {LOCAL_PATH_TO_TMP}. Exiting...")
return False
# Walk through directory, finding valid files to upload
im_upload = []
for file in os.listdir(local_path):
if file.endswith(".jpg") and file[0] == "f":
im_upload.append(file)
# initialize empty queue
#q = queue.Queue()
t1 = perf_counter() # Start runtime clock
# Concurrently execute file uploads using 100 workers for the thread pool
with ThreadPoolExecutor(max_workers=50) as executor:
for file in tqdm (im_upload, desc="Threading upload", ascii=False, ncols=100):
executor.submit(push_to_drive_as_child, drive, local_meta, file, folder_id)
# Dequeue drive ids, adding each to metadata as it is popped from the queue
#while not q.empty():
# _row, _id = q.get()
# local_meta.at[_row, "ID"] = _id
t1 -= perf_counter()
# Clean up dataframe from auto-add during copying and writing operations
#for col in local_meta.columns.tolist():
# Remove any column that is not an essential metadata field
# if col not in METADATA_FIELDS:
# del local_meta[col]
local_meta.to_csv(path_or_buf=local_path + metadata_filename)
# Upload metadata to google drive
upload_as_child(drive, metadata_filename, folder_id)
print(f"Total upload time: {abs(t1)}s")
def upload_new_dataset(dataset_name, local_path=LOCAL_PATH_TO_TMP, metadata_filename=DATASET_METADATA_FILENAME):
"""
Upload a new dataset to folder in Google Drive
Args:
dataset_name (str): Name of new dataset folder
local_path (str, optional): Path to cv-chess/core/vision/. Defaults to "/Users/johnmarangola/Desktop/repos/cv-chess/core/vision/".
Returns:
boolean: True if dataset successfully uploaded, False otherwise.
"""
drive = authenticate()
if get_id(drive, dataset_name) is not None:
print(f"Dataset {dataset_name} already exists. Exiting...")
return False
root_id = create_root_folder(drive, dataset_name)
if root_id is None:
print("Error.")
return False
# Upload the dataset from local to Drive
return upload_local_dataset(dataset_name, root_id, local_path=LOCAL_PATH_TO_TMP, metadata_filename=DATASET_METADATA_FILENAME)
def add_to_existing_dataset(dataset_name, local_path=LOCAL_PATH_TO_TMP, cloud_metadata_filename=DATASET_METADATA_FILENAME):
drive = authenticate()
folder_id = get_id(drive, dataset_name)
# Check to ensure that the dataset folder exists in Google Drive
if folder_id is None:
print(f"Dataset {dataset_name} not found")
return False
folder_id_string = "\'" + folder_id + "\'" + " in parents and trashed=false"
file_list = drive.ListFile({'q': folder_id_string}).GetList()
metadata_id = None
# Iterate through dataset directory, searching for metadata filename
for file in file_list:
if file['title'] == cloud_metadata_filename:
metadata_id = file['id']
metadata_file = drive.CreateFile({'id':metadata_id})
metadata_file.GetContentFile(cloud_metadata_filename)
break
# Exit if could not find metadata .csv
if metadata_id is None:
print("Metadata .csv not found. Exiting...")
sys.exit()
cloud_metadata_df = pd.read_csv(cloud_metadata_filename)
os.chdir(local_path)
try:
local_meta = pd.read_csv(cloud_metadata_filename)
except:
print(f"Unable to load metadata file {cloud_metadata_filename} from {LOCAL_PATH_TO_TMP}. Exiting...")
return False
# Walk through directory, finding valid files to upload
im_upload = []
for file in os.listdir(local_path):
if file.endswith(".jpg") and file[0] == "f":
im_upload.append(file)
# initialize empty queue
q = queue.Queue()
t1 = perf_counter() # Start runtime clock
# Concurrently execute file uploads using 100 workers for the thread pool
with ThreadPoolExecutor(max_workers=25) as executor:
for file in tqdm (im_upload, desc="Threading upload", ascii=False, ncols=100):
executor.submit(push_to_drive_as_child, drive, local_meta, file, folder_id, q)
# Dequeue drive ids, adding each to metadata as it is popped from the queue
while not q.empty():
_row, _id = q.get()
local_meta.at[_row, "ID"] = _id
t1 -= perf_counter()
temp_frames = [cloud_metadata_df, local_meta]
resulting_dataframe = pd.concat(temp_frames)
# Clean up dataframe from auto-add during copying and writing operations
for col in resulting_dataframe.columns.tolist():
# Remove any column that is not an essential metadata field
if col not in METADATA_FIELDS:
del resulting_dataframe[col]
resulting_dataframe.to_csv(path_or_buf=local_path + cloud_metadata_filename)
# Upload metadata to google drive
upload_as_child(drive, cloud_metadata_filename, folder_id)
print(f"Total upload time: {abs(t1)}s")
return True
def authenticate(creds_path=LOCAL_PATH_TO_TMP[:-4]):
"""
Authenticate for upload
Args:
creds_path (str, optional): Path to credentials. Defaults to LOCAL_PATH_TO_TMP[:-4].
Returns:
GoogleDrive object: Google drive context object for authenticated user
"""
# Run authentication:
gauth = GoogleAuth()
os.chdir(creds_path)
# Try to load saved client credentials
gauth.LoadCredentialsFile("mycreds.txt")
# Authenticate if they're not there
if gauth.credentials is None:
gauth.LocalWebserverAuth()
# Refresh them if expired
elif gauth.access_token_expired:
gauth.Refresh()
# Initialize the saved creds
else:
gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile("mycreds.txt")
# Create a drive object
drive = GoogleDrive(gauth)
return drive
def push_to_drive_as_child(drive, local_meta, filename, parent_id):
"""
Upload an image to Google Drive and store drive file id in queue. Concurrently executed by many threadpool workers in parallel.
Args:
drive (GoogleDrive object): [description]
local_meta (pandas.DataFrame): Pandas dataframe of metadata
filename (str): Filename of file that is being uploaded
parent_id (str): Google drive Id of the folder that the image is being uploaded to
#//q (queue.Queue): Queue of [row, id] pairs of uploaded images
"""
file = drive.CreateFile({'parents': [{'id': parent_id}]})
file.SetContentFile(filename)
file.Upload()
#id = file["id"]
#temp = local_meta.index[local_meta["File"]==filename].tolist()
# Add drive file id to meta_data csv iff metadata has been correctly preprocessed for upload
#if len(temp) != 1:
# print("Exiting, input .csv not properly formatted")
# sys.exit() # Terminate all execution
#row = temp[0]
#q.put([row, id])
def upload_iphone_dataset(drive, path_to_iphone_raw_data, csv_path, FEN, local_path=LOCAL_PATH_TO_TMP):
os.chdir(path_to_iphone_raw_data)
files = [f for f in os.listdir(os.getcwd()) if isfile(os.path.join(os.getcwd(), f))]
print(f"number of raw images: {len(files)}")
# make sure FEN is all uppercase
FEN = FEN.upper()
df = pd.DataFrame()
try:
df = pd.read_csv(csv_path, header=False, index=False)
except:
print("Could not find existing .csv, initializing empty .csv...")
for file in files:
# Get dict of positions
temp_dict = collection.fen_to_dict(FEN)
img = cv2.imread(file)
tiles = pre.board_to_64_files(img, temp_dict, base_directory=local_path) # Break image up into 64 files
data_frame = pd.DataFrame(tiles)
data_frame = data_frame.transpose()
frames = [df, data_frame]
df = pd.concat(frames) # Concatenate dataframe
df.to_csv(local_path + 'my_csv.csv', header=False, index=False)
def push_to_drive(drive, local_meta, filename, q):
"""
Push a file to root directory of Drive
Args:
drive (GoogleDrive object): Access to Google Drive
local_meta (pandas.DataFrame): Image metadata dataframe
filename (str): Name of .jpg image to be uploaded (includes '.jpg')
q (queue.Queue): Queue of [row, id] pairs uploaded
"""
file = drive.CreateFile()
file.SetContentFile(filename)
file.Upload()
id = file["id"]
temp = local_meta.index[local_meta["File"]==filename].tolist()
# Add drive file id to meta_data csv
if len(temp) != 1:
print("Exiting, input .csv not properly formatted")
sys.exit()
row = temp[0]
local_meta.at[row, "ID"] = id
q.put([row, id])
if __name__ == "__main__":
drive = authenticate()
#upload_new_dataset("realsense_dataset1")
upload_iphone_dataset(drive, "/Users/johnmarangola/Desktop/repos/cv-chess/core/vision/iphone", LOCAL_PATH_TO_TMP + "my_csv.csv", "5P1R/1Q1RP1P1/3R1P2/QQPPK1R1/1B1K1N2/B1R2N1B/1N2B3R/2B1BN2".upper())
``` |
{
"source": "jmarbasura/gimme-aws-creds",
"score": 3
} |
#### File: gimme-aws-creds/tests/test_config.py
```python
import argparse
import unittest
from mock import patch
from nose.tools import assert_equals
from gimme_aws_creds.config import Config
from gimme_aws_creds import version
class TestConfig(unittest.TestCase):
"""Class to test Config Class.
Mock is used to mock external calls"""
def setUp(self):
"""Set up for the unit tests"""
self.config = Config()
def tearDown(self):
"""Run Clean Up"""
self.config.clean_up()
@patch(
"argparse.ArgumentParser.parse_args",
return_value=argparse.Namespace(
username="ann",
configure=False,
profile=None,
insecure=False,
resolve=None,
mfa_code=None,
register_device=False,
list_profiles=False,
remember_device=False,
),
)
def test_get_args_username(self, mock_arg):
"""Test to make sure username gets returned"""
self.config.get_args()
assert_equals(self.config.username, "ann")
``` |
{
"source": "jmarca/initial-solution",
"score": 4
} |
#### File: initial-solution/src/read_csv.py
```python
import pandas as pd
import numpy as np
import re
def load_demand_from_csv(filename):
"""extract a usable data structure from a csv file
Args:
filename (str): the input csv file to read. will be read with pandas.read_csv(filename)
Returns: a pandas.DataFrame you can use, or just save as json for future runs
"""
demand = pd.read_csv(filename,names=['from_node','to_node','early','late'],header=0)
return demand
def load_matrix_from_csv(filename):
"""extract a usable data structure from a csv file
Args:
filename (str): the input csv file to read. will be read with pandas.read_csv(filename)
Returns: a pandas.DataFrame you can use, or just save as json for future runs
"""
matrix = pd.read_csv(filename,header=None)
return matrix
def travel_time(speed,matrix):
"""convert the distance matrix into a travel time matrix"""
return matrix.copy().floordiv(speed)
```
#### File: initial-solution/test/test_output.py
```python
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
from functools import partial
import solution_output as SO
import evaluators as E
import demand as D
import vehicles as V
import model_run as MR
import initial_routes as IR
import read_csv as reader
# hack to capture stdout to a string, to test it
import io, sys
from contextlib import contextmanager
import os
import filecmp
output_file = 'test_output.txt'
second_output_file = 'test_output_1.txt'
third_output_file = 'test_output_2.txt'
expected_file = 'test/data/expected_test_output.txt'
expected_breaks_file = 'test/data/expected_test_breaks_output.txt'
class MockArgs():
def __init__(self):
self.speed = 60
self.summary_output = output_file
@contextmanager
def redirected(out=sys.stdout, err=sys.stderr):
saved = sys.stdout, sys.stderr
sys.stdout, sys.stderr = out, err
try:
yield
finally:
sys.stdout, sys.stderr = saved
def test_output():
horizon = 20000
m = reader.load_matrix_from_csv('test/data/matrix.csv')
odpairs = reader.load_demand_from_csv('test/data/demand.csv')
d = D.Demand(odpairs,m,horizon)
m = d.generate_solver_space_matrix(m)
v = V.Vehicles(5,horizon)
# (assignment,routing,manager) = MR.model_run_nobreaks3(d,m,v)
(assignment,routing,manager) = MR.model_run_nobreaks(d,m,v.vehicles)
assert assignment
out = io.StringIO()
err = io.StringIO()
args = MockArgs()
with redirected(out=out, err=err):
out.flush()
err.flush()
SO.print_solution(d,m,m,
v,manager,routing,assignment,horizon,
0,args
)
output = out.getvalue()
expected_output = ""
assert output == expected_output
assert filecmp.cmp(output_file,expected_file)
# make sure output file was created as directed
assert os.path.exists(args.summary_output)
# write details again, and this time there should be a _1 version of args.summary_output
assert not os.path.exists(second_output_file)
SO.print_solution(d,m,m,
v,manager,routing,assignment,horizon,
0,args
)
# created alternate named file
assert os.path.exists(second_output_file)
assert filecmp.cmp(output_file,second_output_file)
# now try again without the file
out = io.StringIO()
err = io.StringIO()
args.summary_output = None
with redirected(out=out, err=err):
out.flush()
err.flush()
SO.print_solution(d,m,m,
v,manager,routing,assignment,horizon,
0,args
)
output = out.getvalue()
f = open(expected_file, "r", encoding="utf-8")
expected_output = f.read()
assert output == expected_output
assert not os.path.exists(third_output_file)
os.unlink(output_file)
os.unlink(second_output_file)
# reset args to dump output file
args = MockArgs()
# test when run with breaks
x_m = d.insert_nodes_for_breaks(m)
trip_chains = IR.initial_routes_2(d,v.vehicles,x_m)
initial_routes = [v for v in trip_chains.values()]
(assignment,routing,manager) = MR.model_run(d, x_m, v.vehicles,
10000, None, initial_routes)
SO.print_solution(d,x_m,x_m,
v,manager,routing,assignment,horizon,
10000,args
)
assert filecmp.cmp(output_file,expected_breaks_file)
os.unlink(output_file)
``` |
{
"source": "JMarcan/computer_vision_perception",
"score": 3
} |
#### File: computer_vision_perception/image_classifier/model_lib.py
```python
import torch
from torch import nn
from collections import OrderedDict
from torchvision import datasets, transforms, models
def save_checkpoint(model, checkpoint_path, output_categories):
'''
Save the trained deep learning model
Args:
model: trained deep learning model to be saved
checkpoint_path(str): file path where model will be saved
output_categories(int): number of output categories recognized by the model
Returns:
None
'''
model.cpu()
torch.save({'arch': 'vgg16',
'state_dict': model.state_dict(),
'class_to_idx': model.class_to_idx,
'output_categories': output_categories
},checkpoint_path)
def load_checkpoint(checkpoint_path, device='cuda'):
'''
Loads trained deep learning model
Args:
checkpoint_path(str): file path where model will be saved
Returns:
model: loaded deep learning model
'''
check = torch.load(checkpoint_path, map_location=device)
if check['arch'] == 'vgg16':
model = models.vgg16(pretrained = True)
elif check['arch'] == 'vgg13':
model = models.vgg13(pretrained = True)
else:
print("Error: LoadCheckpoint - Model not recognized")
return 0
output_categories = 2
try:
if check['output_categories'] >= 2:
output_categories = check['output_categories']
else:
print("Error: LoadCheckpoint - Saved model output categories has invalid value ({0}). Value needs to be 2 or higher.".format(check['output_categories']))
return 0
except Exception as e: # when ['output_categories'] is not part of save model
print("Error: LoadCheckpoint - Saved model does not contain information about output categories: {0}".format(e))
return 0
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = check['class_to_idx']
model.classifier = load_classifier(model, output_categories)
model.load_state_dict(check['state_dict'])
return model
def load_classifier(model, output_categories):
'''
Loads the classifier that we will train
Args:
model: deep learning model for which we create the classifier
output_categories(int): number of output categories
recognized by the model
Returns:
classifier: loaded classifier for a given model
'''
'''
# VGG16 classifier structure:
(classifier): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU(inplace)
(2): Dropout(p=0.5)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace)
(5): Dropout(p=0.5)
(6): Linear(in_features=4096, out_features=1000, bias=True)
'''
#Classifier parameters
classifier_input = model.classifier[0].in_features #input layer of vgg16- has 25088
classifier_hidden_units = 4096 # 4096 default model value
classifier = nn.Sequential(
nn.Linear(classifier_input, classifier_hidden_units, bias=True),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(classifier_hidden_units, output_categories),
nn.LogSoftmax(dim=1)
# Log softmax activation function ensures that sum of all output probabilities is 1 \
# - With that we know the confidence the model has for a given class between 0-100%
)
return classifier
```
#### File: computer_vision_perception/image_classifier/train.py
```python
import torch
from torch import nn
from torch import optim
from torchvision import datasets, transforms, models
from collections import OrderedDict
from torch.optim import lr_scheduler
import argparse
import time
import copy
import model_lib
class Train:
def __init__(self, data_dir, output_categories, means, stds, model_name):
'''
Initialize deep learning model to be trained
Args:
data_dir(str): directory containing:
1. folder 'train' with training data
2. folder 'valid' with validation data used during training
3. folder 'test' with testing data used after training
4. file 'cat_to_name.json' with mapping between label ids and label names
output_categories(int): number of output labels that our model shall identify
means(array): means for the dataset e.g. [0.485, 0.456, 0.406]
stds(array): stds for the dataset e.g. [0.229, 0.224, 0.225]
model_name(str): Chose model to be trained ("vgg16" or "vgg13")
Returns:
None
'''
# Define folders
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# Define number of possible categories that our network must recognize in your data
self.labels_output_categories = output_categories
# File with maping between label ids and their names
self.label_map_file = data_dir + "/cat_to_name.json"
# Define means and stds for images
self.means = means
self.stds = stds
# Define transforms for the training, validation, and testing sets
train_transforms = transforms.Compose([
# data augmentation
transforms.RandomRotation(45),
transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop(224),
# data normalization
transforms.ToTensor(),
transforms.Normalize(means,stds)
])
testing_transforms = transforms.Compose([
# data normalization
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(means,stds)
])
validation_transforms = transforms.Compose([
# data normalization
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(means,stds)
])
# Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
test_data = datasets.ImageFolder(test_dir, transform=testing_transforms)
validation_data = datasets.ImageFolder(valid_dir, transform=validation_transforms)
# Define the dataloaders using the image datasets and image transformations
self.dataloaders = {'train': torch.utils.data.DataLoader(train_data, batch_size=16, num_workers=0, shuffle=True),
'test': torch.utils.data.DataLoader(test_data, batch_size=16, num_workers=0),
'validation': torch.utils.data.DataLoader(validation_data, batch_size=16, num_workers=0)
}
self.dataset_sizes = {'train': len(self.dataloaders['train']),
'test': len(self.dataloaders['test']),
'validation': len(self.dataloaders['validation'])}
self.model = self._load_model(model_name)
# Turn off gradients for our model
for param in self.model.parameters():
param.require_grad = False
#feed-forward network
self.model.classifier = model_lib.load_classifier(self.model, self.labels_output_categories)
# Load naming of output categories
self.model.class_to_idx = train_data.class_to_idx
def _load_model(self, model_name):
'''
Initialize pretrained deep learning model (vgg13 or vgg16)
Args:
mode_name(str): 'vgg16' or 'vgg13'
Returns:
model: pretrained deep learning model (vgg13 or vgg16)
'''
if (model_name == "vgg16"):
return models.vgg16(pretrained=True)
elif model_name == "vgg13":
return models.vgg13(pretrained=True)
else:
raise "Error _load_model: Only vgg16 and vgg13 models are supported"
def train_model(self, learning_rate, epochs, device, save_checkpoint_path):
'''
Train the deep learning model
Args:
learning_rate(float): learning rate for the optimizer e.g. 0.003
epochs(int): number of training epochs that will be executed
device(str): 'cuda' for using gpu, 'cpu' to run without gpu (not recommended, slow)
Returns:
None
'''
print ("train_model start. device: \'{0}\' | learnin_rate: {1} | epochs: {2}"\
.format(device, learning_rate, epochs))
start = time.process_time()
criterion = nn.NLLLoss()
optimizer = optim.SGD(self.model.classifier.parameters(), lr = learning_rate)
self.model.train()
self.model.to(device)
running_loss = 0
print_every = 100 # Define how often the intermediate training status will be printed \
# - and the model tested against the validation set
for epoch in range(epochs):
steps = 0
for images, labels in self.dataloaders["train"]:
steps += 1
self.model.train()
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
logps = self.model.forward(images)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
test_loss = 0
accuracy = 0
# model intermediate evaluation during the training
self.model.eval()
# deactivate autograd engine during validation.
# - it will reduce memory usage
# - it will speed up computations for model validation
# - it will disable backpropagation we don't want to have during model validation anyway
with torch.no_grad():
for images, labels in self.dataloaders["validation"]:
images, labels = images.to(device), labels.to(device)
logps = self.model.forward(images)
loss = criterion(logps, labels)
test_loss += loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_ps, top_class = ps.topk(1, dim=1)
equality = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equality.type(torch.FloatTensor))
print("Epoch {0}/{1}".format(epoch+1, epochs))
print("Steps {0}/{1}".format(steps, self.dataset_sizes["train"]))
print("Train loss {:.2}".format(running_loss/print_every))
print("Validation loss {:.2}".format(test_loss/self.dataset_sizes["validation"]))
print("Validation accuracy {:.2}\n".format(accuracy/self.dataset_sizes["validation"]))
running_loss = 0
epoch_end = time.process_time()
print("Training runtime so far {0}".format(epoch_end - start))
end = time.process_time()
print("Training finished. Run time: {0}".format(end - start))
model_lib.save_checkpoint(self.model, save_checkpoint_path, self.labels_output_categories)
print ("Checkpoint saved into file \'{0}\'".format(save_checkpoint_path))
def test_model(self, device):
self.model.to(device)
self.model.eval()
criterion = nn.NLLLoss()
test_loss = 0
accuracy = 0
steps = 0
print_every = 100 # Define how often the intermediate training status will be printed \
# deactivate autograd engine during validation.
# - it will reduce memory usage
# - it will speed up computations for model validation
# - it will disable backpropagation we don't want to have during model validation anyway
with torch.no_grad(): # Turn off gradients for validation, saves memory and computations
for images, labels in self.dataloaders["test"]:
steps += 1
if steps % print_every == 0:
print("Steps {0}/{1}".format(steps, self.dataset_sizes["train"]))
images, labels = images.to(device), labels.to(device)
logps = self.model.forward(images)
loss = criterion(logps, labels)
test_loss += loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_ps, top_class = ps.topk(1, dim=1)
equality = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equality.type(torch.FloatTensor)).item()
print("Validation loss {:.2}".format(test_loss/self.dataset_sizes["test"]))
print("Validation accuracy {:.2%}".format(accuracy/self.dataset_sizes["test"]))
# ============= main ==============
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", required=True, help="path to an folder containing data folders \ (training, validation, testing) and file cat_to_name.json containing translation between output categories and their names")
parser.add_argument("--output_cat", required=True, type=int, help="specify number of output categories for the model")
parser.add_argument("--means", required=False, default=[0.485, 0.456, 0.406], help="image dataset means (provided with dataset)'")
parser.add_argument("--stds", required=False, default=[0.229, 0.224, 0.225], help="image dataset stds (provided with dataset)'")
parser.add_argument("--arch", required=False, default="vgg16", choices=["vgg16", "vgg13"], help="Select model for transfer learning. Default is vgg16")
parser.add_argument("--save_path", required=False, default="assets/model/checkpoint_script.pth", help="location where to save a model checkpoint. Default is 'assets/model/checkpoint_script.pth'")
parser.add_argument("--learning_rate", required=False, type=float, default=0.03, help="specify learning rate for the model. Default is 0.03")
parser.add_argument("--epochs", required=False, type=int, default=7, help="specify number of epochs for training. Default is 7")
parser.add_argument("--device", required=False, default="cpu",choices=["cpu", "gpu"], help="use GPU during the computation. Default is CPU")
args = parser.parse_args()
data_directory = args.data_dir
output_categories = args.output_cat
means = args.means
stds = args.stds
save_path = args.save_path
arch = args.arch
learning_rate = args.learning_rate
epoch = args.epochs
device = args.device
train_cl = Train(data_directory, output_categories, means, stds, arch)
train_cl.train_model(learning_rate, epoch, device, save_path)
train_cl.test_model(device)
``` |
{
"source": "JMarcan/ml_and_natural_language_processing",
"score": 3
} |
#### File: classification_of_messages/data/process_data.py
```python
import sys
import pandas as pd
from sqlalchemy import create_engine
DEBUG = True
def debug_message(message):
'''
Print debug messages (if activated)
Args:
message: the message to be printed
Returns:
None
'''
if DEBUG == True:
print("Debug: {0}".format(message))
def extract_column_names(row):
'''
The function retrieves column names in provided row.
Column names must be string before first occurance of '-'
Args:
row
Returns:
column_names
'''
column_names = []
for c in row:
s = c.split('-')
column_names.append(s[0])
return column_names
def load_data(messages_path, categories_path):
'''
The function loads data
Args:
messages_path: path to the file containing messages
categories_path: path to the file containing categories
Returns:
df: pandas dataframe containing merged datasets
'''
debug_message("run_ETL_pipeline entry (messages_path: {} | categories_path: {})".format(messages_path, categories_path))
# Load datasets
messages = pd.read_csv(messages_path)
categories = pd.read_csv(categories_path)
# Merge datasets
df = pd.merge(messages, categories, how="left", on="id")
debug_message("load_data exit")
return df
def clean_data(df):
'''
The function cleans data so they can be later applied for machine learning
Args:
df: pandas dataframe containing merged dataset
Returns:
df: cleaned pandas dataframe prepared to be used in machine learning
'''
# Split categories into separate category columns.
# create a dataframe of the 36 individual category columns
categories = df['categories'].str.split(";", expand=True)
# select the first row of the categories dataframe
row = categories.iloc[0]
# extract a list of new column names for categories.
#TODO: Can be optimized by executing vectorized operation instead of one by one... Crude looping in Pandas, or That Thing You Should Never Ever Do
category_colnames = extract_column_names(row)
# rename the columns of `categories`
categories.columns = category_colnames
# Convert category values to just numbers 0 or 1
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].str.split('-').str[1]
# convert column from string to numeric
categories[column] = categories[column].astype(int)
# Replace categories column in df with new category columns
# drop the original categories column from `df`
df = df.drop(columns = ["categories"])
# concatenate the original dataframe with the new `categories` dataframe
df = pd.concat([df, categories], axis=1)
# Remove duplicates.
# check number of duplicates
duplicated = df[df.duplicated(subset = 'message')]
debug_message("Number of duplicates before removing them: {}".format(duplicated.shape[0]))
# drop duplicates
df = df.drop_duplicates(subset = 'message')
# check number of duplicates
duplicated = df[df.duplicated(subset = 'message')]
debug_message("Number of duplicates after removing them: {}".format(duplicated.shape[0]))
debug_message("clean_data exit")
return df
def run_ETL_pipeline(messages_path, categories_path, db_path):
'''
The function Orchestrates ETL pipeline run
Args:
messages_path: path to the file containing messages
categories_path: path to the file containing categories
db_path: path where to save the result
Returns:
None
'''
debug_message("run_ETL_pipeline entry (messages_path: {} | categories_path: {})".format(messages_path, categories_path))
# 1. Load datasets
df = load_data(messages_path, categories_path)
# 2. clean data
df = clean_data(df)
# 3. stores cleaned data to database
save_cleaned_data(df, db_path)
debug_message("run_ETL_pipeline exit")
def save_cleaned_data(df, db_path):
'''
Saves scleaned data into the file
Args:
model: the model to be saved
db_path: the location where model will be saved
Returns:
None
'''
debug_message("save model enter")
# Export model as a pickle file
sql_path = 'sqlite:///{}'.format(db_path)
engine = create_engine(sql_path)
df.to_sql('DisasterResponse', engine, index=False, if_exists='replace')
debug_message("save model exit")
def main():
'''
Main function orchestrating the execution
'''
if len(sys.argv) == 4:
messages_path, categories_path, db_path = sys.argv[1:]
run_ETL_pipeline(messages_path, categories_path, db_path) # run ETL pipeline
else:
print('Please provide: \n'\
'-the filepath of the disaster messages file as the first argument \n'\
'-the filepath of the disaster categories file as the second argument \n'\
'-the name of file where you want to save cleaned dataset \n'\
'\nExample: python process_data.py disaster_messages.csv disaster_categories.csv DisasterResponse.db')
if __name__ == '__main__':
main()
``` |
{
"source": "JMarcan/robotics_and_algorithms",
"score": 4
} |
#### File: robot_motion_planning/code/maze.py
```python
import numpy as np
class Maze(object):
def __init__(self, filename):
'''
Maze objects have two main attributes:
- dim: mazes should be square, with sides of even length. (integer)
- walls: passages are coded as a 4-bit number, with a bit value taking
0 if there is a wall and 1 if there is no wall. The 1s register
corresponds with a square's top edge, 2s register the right edge,
4s register the bottom edge, and 8s register the left edge. (numpy
array)
The initialization function also performs some consistency checks for
wall positioning.
'''
with open(filename, 'r') as f_in:
# First line should be an integer with the maze dimensions
self.dim = int(f_in.readline())
# Subsequent lines describe the permissability of walls
walls = []
for line in f_in:
line_walls = line.split(',')
# transform string into ints inside of the list
length = len(line_walls)
for i in range(length):
line_walls[i] = int(line_walls[i])
walls.append(line_walls)
self.walls = np.array(walls)
# Perform validation on maze
# Maze dimensions
if self.dim % 2:
raise Exception('Maze dimensions must be even in length!')
if self.walls.shape != (self.dim, self.dim):
raise Exception('Maze shape does not match dimension attribute!')
# Wall permeability
wall_errors = []
# vertical walls
for x in range(self.dim-1):
for y in range(self.dim):
if (self.walls[x,y] & 2 != 0) != (self.walls[x+1,y] & 8 != 0):
wall_errors.append([(x,y), 'v'])
# horizontal walls
for y in range(self.dim-1):
for x in range(self.dim):
if (self.walls[x,y] & 1 != 0) != (self.walls[x,y+1] & 4 != 0):
wall_errors.append([(x,y), 'h'])
if wall_errors:
for cell, wall_type in wall_errors:
if wall_type == 'v':
cell2 = (cell[0]+1, cell[1])
print ('Inconsistent vertical wall betweeen {} and {}'.format(cell, cell2))
else:
cell2 = (cell[0], cell[1]+1)
print ('Inconsistent horizontal wall betweeen {} and {}'.format(cell, cell2))
raise Exception('Consistency errors found in wall specifications!')
def is_permissible(self, cell, direction):
"""
Returns a boolean designating whether or not a cell is passable in the
given direction. Cell is input as a list. Directions may be
input as single letter 'u', 'r', 'd', 'l', or complete words 'up',
'right', 'down', 'left'.
"""
dir_int = {'u': 1, 'r': 2, 'd': 4, 'l': 8,
'up': 1, 'right': 2, 'down': 4, 'left': 8}
try:
return (self.walls[tuple(cell)] & dir_int[direction] != 0)
except:
print ('Invalid direction provided!')
def dist_to_wall(self, cell, direction):
"""
Returns a number designating the number of open cells to the nearest
wall in the indicated direction. Cell is input as a list. Directions
may be input as a single letter 'u', 'r', 'd', 'l', or complete words
'up', 'right', 'down', 'left'.
"""
dir_move = {'u': [0, 1], 'r': [1, 0], 'd': [0, -1], 'l': [-1, 0],
'up': [0, 1], 'right': [1, 0], 'down': [0, -1], 'left': [-1, 0]}
sensing = True
distance = 0
curr_cell = list(cell) # make copy to preserve original
while sensing:
if self.is_permissible(curr_cell, direction):
distance += 1
curr_cell[0] += dir_move[direction][0]
curr_cell[1] += dir_move[direction][1]
else:
sensing = False
return distance
```
#### File: robot_motion_planning/code/robot.py
```python
import numpy as np
import json
import random
from sys import stderr
class Robot(object):
def __init__(self, maze_dim):
"""
set up attributes that the robot
will use to learn and navigate the maze. Some initial attributes are
provided based on common information, including the size of the maze
the robot is placed in.
Args:
maze_dim: int providing maze dimensions
(e.g. 12 means that maze has dimensions 12x12)
Returns:
None
"""
self.maze_dim = maze_dim
self.maze_area = maze_dim ** 2.
# robot location tracking variables
self.location_orig = [0, 0]
self.location = [0, 0]
self.location_last = [0, 0]
self.heading = 'up'
# variables to create robot's internal map of the maze
self.maze_grid = np.zeros((maze_dim, maze_dim), dtype=np.int) # Grid for wall locations for each maze.
self.path_grid = np.zeros((maze_dim, maze_dim), dtype=np.int)
self.visited_grid = np.zeros((maze_dim, maze_dim), dtype=np.int) #visited paths used for Treumax algo
self.visited_grid_previous_heading = np.zeros((maze_dim, maze_dim), dtype=object) #visited paths used for Treumax algo
# measuring number of steps in which the maze was solved
self.step_count = 0
# Maximum allowed movement units per turn
self.max_movement = 3
self.backtracking = False
self.is_reversing = False #to indicate that 180 degrees turn must be completed (done by two right turns)
# Robot's operational mode
# This decides robot's action when next_move() is called.
self.mode = "explore"
# Flag that indicates the first step of exploration
self.is_beginning = True
#possible path grid values
self.UNVISITED = 0
self.VISITED = 1
self.DOUBLE_VISITED = 2
self.SHORTEST = 3 # marking shortest path, so it can be visualized
# Numbers assigned to open walls in cells.
self.wall_values = {'up': 1,
'right': 2,
'down': 4,
'left': 8}
# Internal robot's maze cell map
# Each number represents a four-bit number that has a bit value of 0 if an edge is closed (walled) and
# 1 if an edge is open (no wall); the 1s register corresponds with the upwards-facing side, the 2s register
# the right side, the 4s register the bottom side, and the 8s register the left side. For example,
# the number 10 means that a square is open on the left and right,
# with walls on top and bottom (0*1 + 1*2 + 0*4 + 1*8 = 10).
# The index origin (0, 0) is at the bottom left
self.maze_map = [[0 for _ in range(maze_dim)] for _ in range(maze_dim)]
# Corresponding new headings after rotating
self.dict_rotation = {'up': ['left', 'right'],
'right': ['up', 'down'],
'down': ['right', 'left'],
'left': ['down', 'up']}
# Opposite directions
self.opposite = {'up': 'down',
'right': 'left',
'down': 'up',
'left': 'right'}
# Vectors for different directions
self.direction_to_vec = {'up': [0, 1],
'right': [1, 0],
'down': [0, -1],
'left': [-1, 0]}
# Rotation matrices
self.rot_matrices = {'left': np.array([(0, 1), (-1, 0)]),
'up': np.array([(1, 0), (0, 1)]),
'right': np.array([(0, -1), (1, 0)])}
# Dictionary for backtracking, translates robot's headings into direction relative to the maze
self.direction_to_rotation = {
heading: {directions[0]: -90, directions[1]: 90}
for heading, directions in self.dict_rotation.items()}
# Policy grid which will be created after performing a search algorithm.
self.policy_grid = [['' for _ in range(self.maze_dim)] for _ in
range(self.maze_dim)]
# Text file in which the travelled path will be logged.
self.log_filename = 'robot_path.json'
# create file logging visited path and write head line
with open(self.log_filename, 'w+') as file:
file.write('[step_count, robot_x, robot_y, visited, heading]\n')
# decides whether debug message will be displayed
self.DEBUG = False
def print_debug(self, debug_message):
"""Prints debug message if Debug mode is set to True
Args:
debug_message: string to be printed
Returns:
None
Examples:
>>> print_debug("move robot to the right")
"""
if self.DEBUG == True:
print("[ Debug message ]: {0}".format(debug_message))
def wall_follower(self, sensors):
"""Wall follower algorithm deciding on the next step
The wall follower algorithm works only for simply connected maze types.
Left-hand rule is used.
Args:
sensors: list of three int values indicating number of open squares
in front of the left, center, and right sensors (in that order)
Returns:
rotation, movement
- rotation: integer indicating the robot’s rotation on that timestep.
taking one of three values: -90, 90, or 0
for counterclockwise, clockwise, or no rotation (in that order)
- movement: integer indicating the robot’s movement on that timestep
movement follows the rotiation in the range [-3, 3] inclusive
Examples:
>>> sensors=[0, 10, 0]
>>> rotation, movement = self.wall_follower(sensors)
"""
movement = 0
rotation = 0
# 1. If you can turn left, do it
if sensors[0] > 0:
movement = 1
rotation = -90
self.print_debug("move left")
# 2. Else (If you can't turn left), if you can continue going straight,
# do it
elif sensors[1] > 0:
movement = 1
rotation = 0
self.print_debug("move 1 forward")
# 3. Else (If you can't do either of the previous steps),
# if you can turn right,do it
elif sensors[2] > 0:
movement = 1
rotation = 90
self.print_debug("move right")
# 4. If you reached a dead end, turn back 180 degrees
# (done in two steps by turning right)
else:
movement = 0
rotation = 90
self.print_debug("dead end, turn to the right, no movement")
return rotation, movement
def update_map(self, possible_directions):
"""Update the robot's internal map using the unblocked (open)
directions detected by the current sensor readings.
Args:
possible_directions: list of possible directions
can contain those values: 'left', 'right', 'forward'
Returns:
None
Examples:
>>> possible_directions=['left', 'forward']
>>> rotation, movement = self.update_map(possible_directions)
"""
# Get the unit vector which points in the direction of the robot's heading
movement_vec = np.array(self.direction_to_vec[self.heading])
# First, translate the detected openings into global directions
for direction in possible_directions:
global_dir = None
if direction == 'left':
global_dir = self.dict_rotation[self.heading][0]
elif direction == 'right':
global_dir = self.dict_rotation[self.heading][1]
elif direction == 'up':
global_dir = self.heading
# Get the corresponding wall value for an wall opening in the given direction
wall_value = self.wall_values[global_dir]
# Update the current map cell with the new wall value
self.maze_map[self.location[0]][self.location[1]] |= wall_value
# Rotate robot's direction vector to given direction
dir_vec = np.dot(movement_vec, self.rot_matrices[direction])
# Get the wall opening value for the next cell
wall_value = self.wall_values[self.opposite[global_dir]]
# Update the next map cell with the opening that can be seen from this cell.
# maps entries to deadends.
self.maze_map[self.location[0] + dir_vec[0]][
self.location[1] + dir_vec[1]] |= wall_value
def next_move(self, sensors):
"""
This function determines the next move the robot should make,
based on the input from the sensors after its previous move.
Args:
sensors: inputs are a list of three distances from the robot's left,
front, and right-facing sensors, in that order
Returns:
rotation: indicates desired robot rotation (if any) as a number:
0 for no rotation, +90 for a 90-degree rotation clockwise,
and -90 for a 90-degree rotation counterclockwise.
Other values will result in no rotation.
movement: indicates robot movement, and the robot will attempt
to move the number of indicated squares: a positive number
indicates forwards movement, while a negative number indicates
backwards movement. The robot may move a
maximum of three units per turn. Any excess movement is ignored.
If the robot wants to end a run (e.g. during the first training run in
the maze) then returing the tuple ('Reset', 'Reset') will indicate to
the tester to end the run and return the robot to the start.
"""
rotation = 0
movement = 0
# measure number of steps to solve maze
self.step_count +=1
self.print_debug("=== {0}.step ===".format(self.step_count))
if self.mode == "explore":
# explore and map the complete maze
rotation, movement = self.explore(sensors)
if rotation == "Reset":
# leave exploration mode
return rotation, movement
self.log_location() # store location before its movemenet
# print location and explore maze percentage
x = self.location[0]
y = self.location[1]
self.path_grid[x][y] = 1
print("Robot has explored {:04.2f}% of the maze.\n".format(self.explored_percentage()))
elif self.mode == "search":
self.find_shortest_path()
self.start_racing()
elif self.mode == "race":
# Race to the goal room on the shortest path through the maze.
# The robot still moves iteratively on every call of next_move().
rotation, movement = self.race_to_goal()
# Perform rotations and movements determined by the racing function
# This marks the racing path and logs it to the logfile
self.mark_path(self.SHORTEST)
self.log_location()
if self.movement_allowed(sensors, rotation, movement) == False:
# check that intended movement is possible
print("ERROR: Robot cannot move in a chosen direction. Stopping the robot. [Heading: {0} | Rotation: {1} | Movement: {2} | Location: {3}]".format(self.heading, rotation, movement, self.location), file=stderr)
rotation = 0
movement = 0
else:
self.update_heading(rotation, movement)
print("Location new: {0} | location_last: {1}".format(self.location, self.location_last))
return rotation, movement
def start_racing(self):
"""Start the robot's racing mode
Args:
None
Returns:
None
"""
self.mode = "race"
self.step_count = 0
self.mark_path(self.SHORTEST)
self.log_location()
def race_to_goal(self):
"""Move robot on the shortest path to the goal
Args:
None
Returns:
rotation, movement
- rotation: integer indicating the robot’s rotation on that timestep.
taking one of three values: -90, 90, or 0
for counterclockwise, clockwise, or no rotation (in that order)
- movement: integer indicating the robot’s movement on that timestep
movement follows the rotiation in the range [-3, 3] inclusive
"""
rotation = 0
movement = 0
# First, collect up to three actions in a line if they are the same
actions = []
x, y = self.location[0], self.location[1]
abort = False
while len(actions) < self.max_movement and not abort:
current_action = self.policy_grid[x][y]
if not current_action:
abort = True
else:
actions.append(current_action)
dx, dy = self.direction_to_vec[current_action]
nx, ny = x + dx, y + dy
# Check if the next cell (nx, ny) has the same action.
if (0 <= nx < self.maze_dim and
0 <= ny < self.maze_dim and
self.policy_grid[nx][ny] == current_action):
x = nx
y = ny
else:
abort = True
# Secondly, set rotation and movement according to the collected actions
rotation = self.direction_to_rotation[self.heading].get(
actions[0], 0)
movement = len(actions)
return rotation, movement
def movement_allowed(self, sensors, rotation, movement):
"""Check if desired movement is allowed or blocked by wall
Args:
sensors: three values input from the robot's sensors
rotation: integer indicating the robot’s rotation on that timestep.
taking one of three values: -90, 90, or 0
for counterclockwise, clockwise, or no rotation (in that order)
movement: integer indicating the robot’s movement on that timestep
movement follows the rotiation in the range [-3, 3] inclusive
Returns:
True if movement is allowed. False if not.
Examples:
>>> rotation = 0
>>> movement = 1
>>> is_movement_allowed = self.movement_allowed(sensors, rotation, movement)
"""
if rotation == -90:
return sensors[0] >= movement
elif rotation == 90:
return sensors[2] >= movement
elif rotation == 0:
return sensors[1] >= movement
else:
return False
def explore(self, sensors):
"""Explore a maze using Trémaux' algorithm
Args:
sensors: list of three int values indicating number of open squares
in front of the left, center, and right sensors (in that order)
Returns:
rotation, movement
- rotation: integer indicating the robot’s rotation on that timestep.
taking one of three values: -90, 90, or 0
for counterclockwise, clockwise, or no rotation (in that order)
- movement: integer indicating the robot’s movement on that timestep
movement follows the rotiation in the range [-3, 3] inclusive
Examples:
>>> sensors=[0, 10, 0]
>>> rotation, movement = self.explore(sensors)
"""
rotation = 0
movement = 0
if self.is_beginning:
# This prevents the robot from immediately cancelling exploration
self.is_beginning = False
elif self.finished_exploration():
# When back at the start, end the exploration
rotation, movement = self.end_exploration()
return rotation, movement
# When in reversing mode, just finish the rotation and move forward
if self.is_reversing:
rotation = 90
movement = 1
self.is_reversing = False
self.print_debug("explore: Reversing. Decided rotation: {0} | Movement: {1} | Visited grid: {2} | Backtracking: {3}".format(rotation, movement, self.visited_grid[self.location[0], self.location[1]], self.backtracking))
return rotation, movement
# Translate sensor readings into unblocked directions
open_directions = self.check_open_directions(sensors)
# Update the internal mapping of the maze
self.update_map(open_directions)
# --------------------------------------
# Trémaux' algorithm
# --------------------------------------
x = self.location[0]
y = self.location[1]
if len(open_directions) == 0:
# Robot is at a deadend
# Start backtracking
rotation, movement = self.start_backtracking()
self.mark_path(self.DOUBLE_VISITED)
self.print_debug("explore: 0 - START backtracking. No open directions. ")
elif len(open_directions) == 1:
# Robot is on a path to the next junction
rotation, movement = self.follow_path(self.random_path_choice(open_directions[0]))
if self.backtracking == True:
self.mark_path(self.DOUBLE_VISITED)
else:
self.mark_path(self.VISITED)
elif len(open_directions) > 1:
if self.path_is(self.UNVISITED):
# Store the direction to the path which has led to this junction, used for backtracking.
self.visited_grid_previous_heading[x][y] = self.opposite[self.heading]
unvisited_paths = self.get_paths(open_directions, self.UNVISITED)
if len(unvisited_paths) > 0:
# Still unvisited paths from this junction
rotation, movement = self.follow_path(self.random_path_choice(unvisited_paths))
# Mark this junctions for the first time
self.mark_path(self.VISITED)
if self.backtracking == True:
self.backtracking = False
else:
# no more unvisited paths from this junction, threat is as dead-end
rotation, movement = self.start_backtracking()
self.mark_path(self.DOUBLE_VISITED)
elif self.path_is(self.VISITED):
# Robot has already visited this junction
if self.backtracking == True:
# robot stepped into already visited junction while backtracking
unvisited_paths = self.get_paths(open_directions, self.UNVISITED)
if len(unvisited_paths) > 0:
# Still unvisited paths from this junction
rotation, movement = self.follow_path(self.random_path_choice(unvisited_paths))
if self.backtracking == True:
self.backtracking = False
else:
# no more unvisited paths from this junction, threat is as dead-end
if self.location == self.location_last:
# prevention to stucking in backtracking loop in case it goes through multiple junctions
# robot stays on the same position, choose random direction visite once
visited_paths = self.get_paths(open_directions, self.VISITED)
if len(visited_paths) > 0:
rotation, movement = self.follow_path(self.random_path_choice(visited_paths))
self.print_debug("explore: 2: Warrning: Backtracking stuck robot in the same position. Choosing random path VISITED_ONCE. ")
self.mark_path(self.DOUBLE_VISITED)
else:
self.print_debug("explore: 3: Error: Backtracking stuck robot in the same position. No path visited less than TWICE left")
else:
rotation, movement = self.continue_backtracking()
self.mark_path(self.DOUBLE_VISITED)
else:
# robot stepped into already visited junction while NOT backtracking
# threat it as dead-end
rotation, movement = self.start_backtracking()
self.print_debug("explore: Decided rotation: {0} | Movement: {1} | Visited grid: {2} | Backtracking: {3}".format(rotation, movement, self.visited_grid[self.location[0], self.location[1]], self.backtracking))
return rotation, movement
def find_shortest_path(self):
"""Find the shortest path to the using breadth-first search
and plan robot next actions based on this
Args:
None
Returns:
None
"""
init = [self.location_orig[0], self.location_orig[1]]
# The center cells which make up the goal room.
goal_room = [[self.maze_dim / 2, self.maze_dim / 2],
[self.maze_dim / 2 - 1, self.maze_dim / 2],
[self.maze_dim / 2, self.maze_dim / 2 - 1],
[self.maze_dim / 2 - 1, self.maze_dim / 2 - 1]]
# This could be used to change the movement costs.
cost = 1
# Connects movement delta vectors and
# their corresponding directional actions.
delta_to_action = {(0, 1): 'up',
(1, 0): 'right',
(0, -1): 'down',
(-1, 0): 'left'}
# This grid holds the action delta at every position of the maze.
delta_grid = [[(0, 0) for _ in range(self.maze_dim)] for _ in
range(self.maze_dim)]
# Initialize some values and lists for the search algorithm
g = 0
open_cells = [[g, init[0], init[1]]]
visited = [init]
end = []
# Search through the maze with Dijkstra.
while True:
if not open_cells:
break
open_cells.sort()
# Get the cell from the open list with the lowest cost-value (G-Value).
g, x, y = open_cells.pop(0)
if [x, y] in goal_room:
# Stop when entering the goal room.
end = [x, y]
break
# Check the current position in the maze map for wall openings.
# For every wall opening, the corresponding directional delta vector is added
# to the deltas list. This essentially creates a list of deltas to cells connected to
# the current cell in the map.
deltas = []
for direction, value in self.wall_values.items():
if self.maze_map[x][y] & value != 0:
deltas.append(self.direction_to_vec[direction])
# Now, loop through all the connected cells
for dx, dy in deltas:
# Use delta to calculate the coords of the next cell (nx, ny)
nx, ny = x + dx, y + dy
if [nx, ny] not in visited:
# The next cell is not yet visited
open_cells.append([g + cost, nx, ny])
visited.append([nx, ny])
# Save the action delta vector needed to get to this next cell (nx, ny)
delta_grid[nx][ny] = (dx, dy)
# Create policy path by travelling from end to start
x, y = end
self.policy_grid[x][y] = '*'
while [x, y] != init:
# Apply the previously saved action deltas backwards.
nx = x - delta_grid[x][y][0]
ny = y - delta_grid[x][y][1]
# Save the action string to the policy grid.
self.policy_grid[nx][ny] = delta_to_action[delta_grid[x][y]]
# Continue with the next position
x, y = nx, ny
def random_path_choice(self, paths):
"""
Make predictable random path choice, to make robot movement in a known maze predictable
which simplified analysis
Args:
paths as tuple
with possible values "up" and "right" and "left"
Returns:
path as string
with possible values: "up" or "right" or "left"
"""
if "up" in paths:
return "up"
elif "right" in paths:
return "right"
elif "left" in paths:
return "left"
else:
print("The unkown path in " + str(paths), file=stderr)
return "error"
def continue_backtracking(self):
"""
Continue backtracking. Follow the way from which robot came from
Args:
None
Returns:
rotation, movement
- rotation: integer indicating the robot’s rotation on that timestep.
taking one of three values: -90, 90, or 0
for counterclockwise, clockwise, or no rotation (in that order)
- movement: integer indicating the robot’s movement on that timestep
movement follows the rotiation in the range [-3, 3] inclusive
"""
movement = 1
rotation = 0
# Get direction to which we wish to backtrack to
direction = self.visited_grid_previous_heading[self.location[0]][self.location[1]]
# Translate that direction into a possibly needed rotation of the robot,
# considering the current heading
rotation = self.direction_to_rotation[self.heading].get(direction,0)
self.print_debug("Continue backtracking | direction: {0} | rotation: {1}".format(direction, rotation))
return rotation, movement
def get_paths(self, open_directions, value):
"""
Return paths possible to take from the junction with a given value
Args:
open_directions: list of possible directions from the junction
value: value of paths to be filtered for
- self.UNVISITED or self.VISITED
Returns:
direction_with_value: list of possible directions from the junction
with a given value
"""
direction_with_value = []
for direction in open_directions:
next_step_location = self.get_next_step_coordinates(direction, 1)
if self.visited_grid[next_step_location[0]][next_step_location[1]] == value:
direction_with_value.append(direction)
return direction_with_value
def path_is(self, value, x=None, y=None):
"""
Returns true if the path at the given position has the specified value.
If no position parameters are given, checks at the robot's current position
Args:
value: value agains which the path is tested
- self.UNVISITED or self.VISITED or self.DOUBLE_VISITED
Returns:
True if path has given value. False otherwise
"""
if x is None:
x = self.location[0]
if y is None:
y = self.location[1]
return self.visited_grid[x][y] == value
def follow_path(self, direction):
"""Follow path in the given direction.
Args:
direction: direction in which shall the robot move
- "left" or "up" or "right"
Returns:
rotation, movement
- rotation: integer indicating the robot’s rotation on that timestep.
taking one of three values: -90, 90, or 0
for counterclockwise, clockwise, or no rotation (in that order)
- movement: integer indicating the robot’s movement on that timestep
movement follows the rotiation in the range [-3, 3] inclusive
"""
rotation = 0
movement = 0
if direction == "left":
rotation = -90
movement = 1
elif direction == "up":
rotation = 0
movement = 1
elif direction == "right":
rotation = 90
movement = 1
else:
print(
"Can't follow path, chosen direction " + direction + "is invalid.",
file=stderr)
return rotation, movement
def mark_path(self, new_value=None):
"""
Mark a traveled path to a specified value.
Args:
new_value: value for the path
- self.UNVISITED or self.VISITED or self.DOUBLE_VISITED or self.SHORTEST
Returns:
None
"""
x = self.location[0]
y = self.location[1]
if new_value is None:
if self.visited_grid[x][y] == self.UNVISITED:
self.visited_grid[x][y] = self.VISITED
else:
self.visited_grid[x][y] = self.DOUBLE_VISITED
else:
self.visited_grid[x][y] = new_value
def check_open_directions(self, sensors):
"""Check which directions are not blocked return them.
Args:
sensors: list of three int values indicating number of open squares
in front of the left, center, and right sensors (in that order)
Returns:
open_directions
- list of directions with possible values "left" and "up" and "right"
"""
open_directions = []
if sensors[0] > 0:
open_directions.append('left')
if sensors[1] > 0:
open_directions.append('up')
if sensors[2] > 0:
open_directions.append('right')
return open_directions
def start_backtracking(self):
"""Start reversing the robot to perform a 180 degree rotation.
Args:
None
Returns:
None
"""
self.is_reversing = True
self.backtracking = True
movement = 0
rotation = 90
return rotation, movement
def finished_exploration(self):
"""Returns true when the robot is back at the origin.
Args:
None
Returns:
None
"""
return self.location == self.location_orig
def end_exploration(self):
"""Stop the robot's exploration mode and reset the run.
Args:
None
Returns:
None
"""
print("Robot has reached the origin again. Finishing exploration.")
# Reset some localization-specific values
self.heading = "up"
self.location = self.location_orig
self.mode = "search"
# Set the reset signals
movement = "Reset"
rotation = "Reset"
return rotation, movement
def explored_percentage(self):
"""Calculate the percentage of the maze the robot has visited
Args:
None
Returns:
explored_perct: float with percentage of the maze the robot has visited
Examples:
>>> explored_perct = self.explored_percentage()
"""
explored = 0
for x in range(self.maze_dim):
for y in range(self.maze_dim):
if self.path_grid[x][y] > 0:
explored += 1
explored_perct = (explored/self.maze_area) * 100
return explored_perct
def log_location(self):
"""Append current robot movement in a log file.
Args:
None
Returns:
None
"""
# Data format: [step_count, Pos-X, Pos-Y, CellValue, Heading]
x = self.location[0]
y = self.location[1]
data = [self.step_count, x, y, int(self.visited_grid[x][y]), self.heading]
with open(self.log_filename, 'a') as file:
json.dump(data, file)
file.write('\n')
def update_heading(self, rotation, movement):
"""Updates the direction of the robot and its location in the maze
according to the last move
Args:
rotation: integer indicating the robot’s rotation on that timestep.
taking one of three values: -90, 90, or 0
for counterclockwise, clockwise, or no rotation (in that order)
movement: integer indicating the robot’s movement on that timestep
movement follows the rotiation in the range [-3, 3] inclusive
Returns:
None
Examples:
>>> rotation = 0
>>> movement = 1
>>> self.update_heading(rotation, movement)
"""
self.location_last = self.location
x = self.location[0]
y = self.location[1]
# update heading
if rotation != 0: #change in direction
if rotation == 90: # turn right
if self.heading == "up":
self.heading = "right"
elif self.heading == "right":
self.heading = "down"
elif self.heading == "down":
self.heading = "left"
elif self.heading == "left":
self.heading = "up"
elif rotation == -90: # turn left
if self.heading == "up":
self.heading = "left"
elif self.heading == "left":
self.heading = "down"
elif self.heading == "down":
self.heading = "right"
elif self.heading == "right":
self.heading = "up"
elif rotation == 180:
if self.heading == "up":
self.heading = "down"
elif self.heading == "right":
self.heading = "left"
elif self.heading == "down":
self.heading = "up"
elif self.heading == "left":
self.heading = "right"
# update position
if movement > 0:
if self.heading == "up":
self.location = [x, y+movement]
elif self.heading == "right":
self.location = [x+movement, y]
elif self.heading == "left":
self.location = [x-movement, y]
elif self.heading == "down":
self.location = [x, y-movement]
def get_next_step_coordinates(self, robot_rel_heading, movement):
"""Return coordinates of the next potential move in a given direction
Args:
robot_rel_heading: string indicating relative change of the robot heading
taking one of three values: "up", "right", "left"
movement: integer indicating the robot’s movement on that timestep
movement follows the rotiation in the range [-3, 3] inclusive
Returns:
robot_location: integer [location_x, location_y]
indicating coordinates for the potential move in a given direction
Examples:
>>> robot_rel_heading = "up"
>>> movement = 1
>>> self.get_next_step_coordinates(robot_rel_heading, movement)
"""
robot_heading = robot_rel_heading
robot_location = [0, 0]
x = self.location[0]
y = self.location[1]
# Transform relative heading to the real heading relative to the maze
if robot_rel_heading == "up": #robot intends to continue in the direction
robot_heading = self.heading
elif robot_rel_heading == "right": #robot intends to turn right
if self.heading == "up":
robot_heading = "right"
elif self.heading == "right":
robot_heading = "down"
elif self.heading == "down":
robot_heading = "left"
else: # robot is heading left relative to the maze
robot_heading = "up"
elif robot_rel_heading == "left": #robot intends to turn left
if self.heading == "up":
robot_heading = "left"
elif self.heading == "right":
robot_heading = "up"
elif self.heading == "down":
robot_heading = "right"
else: # robot is heading left relative to the maze
robot_heading = "down"
if robot_heading == "up":
robot_location = [x, y+movement]
elif robot_heading == "right":
robot_location = [x+movement, y]
elif robot_heading == "left":
robot_location = [x-movement, y]
else: # robot is heading down
robot_location = [x, y-movement]
return robot_location
``` |
{
"source": "JMarcan/robot_motion_planning_maze",
"score": 4
} |
#### File: algorithms_exercise/Algorithm 2 - Search in a Rotated Sorted Array/Search in a Rotated Sorted Array.py
```python
def rotated_array_search(input_arr, number):
"""
Find the index by searching in a rotated sorted array
Time complexity: O(log2(n)) (as long as array is rotated only once)
Space Complexity: O(1)
Where n is the array size
Args:
input_array(array), number(int): Input array to search and the target
Returns:
int: Index or -1
"""
pivot_idx = find_pivot(input_arr, 0, len(input_arr))
# if we have not found an pivot, the array is not rotated
if pivot_idx == -1:
return binary_search(input_arr, number)
#if we found a pivot then first
if input_arr[pivot_idx] == number:
return pivot_idx
if input_arr[0] <= number:
return binary_search(input_arr, number, 0, pivot_idx-1);
else:
return binary_search(input_arr, number, pivot_idx+1, len(input_arr)-1,);
def find_pivot(input_arr, min_idx, max_idx):
"""
Find the the pivor index of an rotated array
Time complexity: O(1og2(n))
Space Complexity: O(1)
Args:
input_array(array): rotated array
Returns:
pivot_idx(int)
"""
mid = (min_idx + max_idx) // 2
# if mid element is higher than the next one, we found an pivot
if mid < max_idx and input_arr[mid] > input_arr[mid + 1]:
return mid
# if mid-1 element is higher than the next one (mid element), we found an pivot
if mid > min_idx and input_arr[mid] < input_arr[mid - 1]:
return (mid-1)
# if the first element is higher than the current (mid) element,
# call recrusion for the lower interval
if input_arr[min_idx] >= input_arr[mid]:
return find_pivot(input_arr, min_idx, mid-1)
# else if the first element is lower than the current (mid) element,
# call recrusion for the higher interval
else:
return find_pivot(input_arr, mid + 1, max_idx)
def binary_search(input_list, number, min_idx, max_idx):
"""
Find the index for a given value (number) by searching in a sorted array
Time complexity: O(log2(n))
Space Complexity: O(1)
Args:
- input_list(array): sorted array of numbers to be searched in
- number(int): number to be searched for
Returns:
- position(int): reuturns array index for the given number
returns -1 when the number was not found
"""
# corner case for case when provided min_idx is higher than provided max_idx
if max_idx < min_idx:
return -1
# binary search
while min_idx <= max_idx:
mid = (min_idx + max_idx) // 2
# Check if x is present at mid
if input_list[mid] == number:
return mid
# If the guess was too low, set min to be one larger than the guess
if input_list[mid] < number:
min_idx = mid + 1
# If the guess was too high, set max to be one smaller than the guess
else:
max_idx = mid - 1
# if we got here, the number was not found
return -1
def linear_search(input_list, number):
"""
Find the index for a given value (number) by searching in a sorted array
Time complexity: O(n)
Space Complexity: O(1)
Args:
- input_list(array): sorted array of numbers to be searched in
- number(int): number to be searched for
Returns:
- position(int): reuturns array index for the given number
returns -1 when the number was not found
"""
for index, element in enumerate(input_list):
if element == number:
return index
return -1
def test_rotated_array_search(test_case):
input_list = test_case[0]
number = test_case[1]
if linear_search(input_list, number) == rotated_array_search(input_list, number):
print("Pass")
else:
print("Fail")
def test_binary_search(test_case):
input_arr = test_case[0]
number = test_case[1]
if linear_search(input_arr, number) == binary_search(input_arr, number, 0, len(input_arr)):
print("Pass")
else:
print("Fail")
print ("=== Binary search test cases execution ===:")
test_binary_search([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 5])
test_binary_search([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 4])
test_binary_search([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3])
test_binary_search([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 2])
test_binary_search([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 1])
print ("=== rotated_array_search, test cases ===:")
test_rotated_array_search([[6, 7, 8, 9, 10, 1, 2, 3, 4], 6])
test_rotated_array_search([[6, 7, 8, 9, 10, 1, 2, 3, 4], 1])
test_rotated_array_search([[6, 7, 8, 1, 2, 3, 4], 8])
test_rotated_array_search([[6, 7, 8, 1, 2, 3, 4], 1])
test_rotated_array_search([[6, 7, 8, 1, 2, 3, 4], 10])
print ("=== rotated_array_search, edge test cases ===:")
test_rotated_array_search([[6, 7, 8, 1, 2, 3, 4], 25]) #accessing non existing positive index
test_rotated_array_search([[6, 7, 8, 1, 2, 3, 4], -1]) #accessing negative
```
#### File: algorithms_exercise/Algorithm 4 - Dutch National Flag/Dutch National Flag.py
```python
def sort_012(input_arr):
"""
Given an input array consisting on only 0, 1, and 2, sort the array in a single traversal.
Time Complexity O(n)
Space Complexity O(n)
Where n is the array size.
Args:
input_arr(array): Array to be sorted
Returns:
sorted_arr(array): Sorted array
"""
# Test that input_arr consists of digits between 0 and 9
for element in input_arr:
if element < 0 or element > 2:
return (-1, -1)
bin_zeros = []
bin_ones = []
bin_twos = []
for element in input_arr:
if element == 0:
bin_zeros.append(element)
elif element == 1:
bin_ones.append(element)
elif element == 2:
bin_twos.append(element)
sorted_arr = bin_zeros + bin_ones + bin_twos
return sorted_arr
def test_function(test_case):
sorted_array = sort_012(test_case)
print(sorted_array)
if sorted_array == sorted(test_case):
print("Pass")
else:
print("Fail")
def test_function_edge(test_case):
sorted_array = sort_012(test_case)
print(sorted_array)
if sorted_array == (-1, -1):
print("Pass")
else:
print("Fail")
print ("=== Test cases ===:")
test_function([0, 0, 2, 2, 2, 1, 1, 1, 2, 0, 2])
test_function([2, 1, 2, 0, 0, 2, 1, 0, 1, 0, 0, 2, 2, 2, 1, 2, 0, 0, 0, 2, 1, 0, 2, 0, 0, 1])
test_function([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2])
print ("=== Edge test cases ===:")
# test that -1 will be returned for provided invalid input with not allowed value 3
test_function_edge([1, 2, 3])
# test that -1 will be returned for provided invalid input with not allowed negative values
test_function_edge([0, -1, -2])
```
#### File: exercises_algorithms_data_structures_big_o/algorithms_training/Python Generators - Calculating Factorial.py
```python
def prod(a,b):
output = a*b
return output
def fact_gen():
i = 1
n = i
while True:
output = prod(n, i)
i = i + 1
n = output
yield output
# Test block
my_gen = fact_gen()
num = 5 # number for which factorial should be calculated
for i in range(num):
print(next(my_gen))
# Correct result when num = 5:
# 1
# 2
# 6
# 24
# 120
```
#### File: exercises_algorithms_data_structures_big_o/data_structures_exercises/Problem 4 - Active Directory.py
```python
class Group(object):
def __init__(self, _name):
self.name = _name
self.groups = set()
self.users = set()
def add_group(self, group):
self.groups.add(group)
def add_user(self, user):
self.users.add(user)
def get_groups(self):
return self.groups
def get_users(self):
return self.users
def get_name(self):
return self.name
def is_user_in_group(user, group):
"""
Check whether the user belongs to the group
Args:
user(str): user name/id
group(class:Group): group to check user membership against
Returns:
- True if user is in the group, False otherwise
"""
if isinstance(group, Group) == False:
print("[Warrning] is_user_in_group: provided group (\'{0}\') does not exists. Returning False".format(group))
return False
# check whether user is a direct member of the group
if user in group.users:
return True
# check whether user is member of any sub group of the group
for g in group.groups:
if is_user_in_group(user, g):
return True
return False
def test_cases():
"""
Execute test cases for active directory implementation
Print statements whether the test case passed or not are printed
Args:
None
Returns:
None
"""
parent = Group("parent")
child = Group("child")
sub_child = Group("subchild")
sub_child_user = "sub_child_user"
sub_child.add_user(sub_child_user)
child.add_group(sub_child)
parent.add_group(child)
# TC 1 - user is direct member of this group
if is_user_in_group(sub_child_user, sub_child) == True:
print ("TC 1. Passed")
else:
print ("TC 1. Failed")
# TC 1 - user is direct member of child group
if is_user_in_group(sub_child_user, parent) == True:
print ("TC 2. Passed")
else:
print ("TC 2. Failed")
# TC 2 - not existing user, existing group
if is_user_in_group("not existing user", parent) == False:
print ("TC 3. Passed")
else:
print ("TC 3. Failed")
# TC 3 - not existing user group
if is_user_in_group("not existing user", "not existing group") == False:
print ("TC 4. Passed")
else:
print ("TC 4. Failed")
if __name__ == '__main__':
# execute test cases
test_cases()
```
#### File: top_k_elements_in_data/top_k_elements/top_k_elements.py
```python
import unittest
import heapq
import sys
import logging
import os.path
from typing import List
logger = logging.getLogger(__name__)
class TopElements:
'''TopElements allows you
to get record_identifiers of k-largest values
by processing file (method 'get_top_k_from_file') or
by processing stdin (method 'process_stdin')
'''
@staticmethod
def get_top_k_from_stdin(k: int)-> List[int]:
'''Returns top k record_identifiers associated with the highest value
Args:
k (int) : number of top k record_identifiers to be returned
Returns:
top_k : top k record_identifiers with the highest value
'''
# return empty list for k-values that are 0 or negative
if k < 1:
return []
# variables
top_k_candidates = [] # binary heap (min-heap)
line_idx = 1
# process stdin
for line in sys.stdin:
if 'Exit' == line.rstrip():
break
res = TopElements.__process_line(top_k_candidates, k, line)
if res == 0:
logging.warning(
f'Invalid data format. Skipping this one: \'{line}\'')
line_idx += 1
# return top-x elements
return TopElements.__return_top_k(top_k_candidates)
@staticmethod
def get_top_k_from_file(k: int, input_file : str = None) -> List[int]:
'''Returns top k record_identifiers associated with the highest value
Args:
k (int): number of top k record_identifiers to be returned
input_file (str): path to file containing data to be analyzed
Returns:
top_k: top k record_identifiers with the highest value
'''
# return empty list for k-values that are 0 or negative
if k < 1:
return []
# variables
top_k_candidates = [] # binary heap (min-heap)
invalid_lines = 0
line_idx = 1
# Process input file
try:
f = open(input_file, 'r')
except IOError:
logging.error(f'Cannot open file \'{input_file}\'')
return []
# Reading line by line (O(n)) allows us to read extremely large files exceeding available RAM
for line in f:
res = TopElements.__process_line(top_k_candidates, k, line)
if res == 0:
invalid_lines += 1
logging.warning(
f'{invalid_lines}. Invalid data format. input_file: \'{input_file}\' Skipping line: \'{line_idx}\'. Line data: \'{line}\'')
line_idx += 1
f.close()
# Return top-x elements
return TopElements.__return_top_k(top_k_candidates)
@staticmethod
def __process_line(top_k_candidates: list, k: int, line: str) -> int:
'''Process data in a given line
and keep it between top_k_candidates
if it has a higher value than previous candidates or if k limit was not reached yet.
Only the highest k candicates are kept to optimize memory and handle even extremely large files.
Args:
top_k_candidates (list): number of top k elements that will be returned
k (int): number of top k elements that will be returned
line (str): data of a given line to be processed
Returns:
(int): 1 if the line was processed. 0 if data were in the wrong format
Algorithm time complexity: O(log(k)
Space complexity: O(k)
'''
# parse the line
# & catch and skip incorrect input line processing
item = line.split()
if len(item) != 2:
return 0
try:
index = int(item[0])
except:
return 0
try:
value = int(item[1])
except:
return 0
# Store first top k elements into the heap
if len(top_k_candidates) < k:
heapq.heappush(top_k_candidates, (value, index))
# Keep only top k elements in the heap
else:
min_top_value = top_k_candidates[0][0] #It's min-heap
if value > min_top_value:
# Time complexity: O(log(k))
# Space complexity: O(k)
heapq.heappushpop(top_k_candidates, (value, index))
return 1
@staticmethod
def __return_top_k(top_k_candidates: list) -> List[int]:
top_k = []
for item in top_k_candidates:
index = item[1]
top_k.append(index)
return top_k
``` |
{
"source": "jmarcantony/StormBreaker",
"score": 2
} |
#### File: jmarcantony/StormBreaker/payload.py
```python
import os
class Payload:
def __init__(self, lhost, lport, filename):
self.lhost = lhost
self.lport = lport
self.filename = filename
def create_payload(self, compile=False):
with open("payload_templates/python_backdoor_base.py", "r") as base:
base_content = base.read()
final_payload = base_content.replace("[IP PLACEHOLDER]", self.lhost)
if self.lport != 4444:
final_payload = final_payload.replace("4444", self.lport)
os.mkdir(f"payloads/{self.filename}")
with open(f"payloads/{self.filename}/{self.filename}.py", "w") as f:
f.write(final_payload)
if compile:
try:
os.system(f"pyinstaller --noconsole --onefile --clean --log-level CRITICAL -F --distpath payloads/{self.filename} --workpath payloads/{self.filename}/build --specpath payloads/{self.filename}/build payloads/{self.filename}/{self.filename}.py")
except:
print("[-] Requirements not satisfied!\n run command 'pip install -r requirements.txt' to install requirements")
print(f"\n [+] Payload Created at {os.getcwd()}\payloads\{self.filename} [+]\n")
``` |
{
"source": "jmarca/sports_scheduling",
"score": 2
} |
#### File: sports_scheduling/test/test_edge_cases.py
```python
import re
import os
import subprocess
import io
import sys
from contextlib import contextmanager
import filecmp
@contextmanager
def redirected(out=sys.stdout, err=sys.stderr):
saved = sys.stdout, sys.stderr
sys.stdout, sys.stderr = out, err
try:
yield
finally:
sys.stdout, sys.stderr = saved
def test_edge_cases():
output_file = 'jabba'
try:
# clean up the temp file
os.unlink(output_file+'.csv')
os.unlink(output_file+'_1.csv')
os.unlink(output_file+'_2.csv')
except:
pass
# test adding csv to ending of output file
process_command_line = ['python','src/sports_schedule_sat.py'
,'-t','8'
,'-d','14'
,'-p','2'
,'--cpu','6'
,'--debug'
,'--timelimit','10'
,'--csv',output_file]
try:
proc = subprocess.run(process_command_line, encoding='utf8', capture_output=True)
out = proc.stdout
err = proc.stderr
print('out is ',out)
assert re.search('OPTIMAL', out, re.MULTILINE)
assert re.search('num_search_workers: 6',err,re.MULTILINE)
assert not os.path.isfile(output_file)
assert os.path.isfile(output_file+'.csv')
assert not os.path.isfile(output_file+'_1.csv')
assert not os.path.isfile(output_file+'_2.csv')
except:
assert False
# test adding _1.csv to ending of output file if it already exists
process_command_line = ['python','src/sports_schedule_sat.py'
,'-t','8'
,'-d','14'
,'-p','2'
,'--cpu','6'
,'--debug'
,'--timelimit','10'
,'--csv',output_file]
try:
proc = subprocess.run(process_command_line, encoding='utf8', capture_output=True)
out = proc.stdout
err = proc.stderr
print('out is ',out)
assert re.search('OPTIMAL', out, re.MULTILINE)
assert re.search('num_search_workers: 6',err,re.MULTILINE)
assert not os.path.isfile(output_file)
assert os.path.isfile(output_file+'.csv')
assert os.path.isfile(output_file+'_1.csv')
assert not os.path.isfile(output_file+'_2.csv')
except:
assert False
# test adding _2.csv to ending of output file if it already exists
process_command_line = ['python','src/sports_schedule_sat.py'
,'-t','8'
,'-d','14'
,'-p','2'
,'--cpu','6'
,'--debug'
,'--timelimit','10'
,'--csv',output_file]
try:
proc = subprocess.run(process_command_line, encoding='utf8', capture_output=True)
out = proc.stdout
err = proc.stderr
print('out is ',out)
assert re.search('OPTIMAL', out, re.MULTILINE)
assert re.search('num_search_workers: 6',err,re.MULTILINE)
assert not os.path.isfile(output_file)
assert os.path.isfile(output_file+'.csv')
assert os.path.isfile(output_file+'_1.csv')
assert os.path.isfile(output_file+'_2.csv')
except:
assert False
try:
# clean up the temp file
os.unlink(output_file+'.csv')
os.unlink(output_file+'_1.csv')
os.unlink(output_file+'_2.csv')
except:
pass
```
#### File: sports_scheduling/test/test_round_robin_cases.py
```python
import re
import os
import subprocess
import io
import sys
from contextlib import contextmanager
import filecmp
def test_rr_cases():
# now for various combinations of inputs
output_file = 'test_output.csv'
# test exact round robin case, but just once
process_command_line = ['python','src/sports_schedule_sat.py'
,'-t','8'
,'-d','7'
,'-p','2'
,'--cpu','2'
,'--debug'
,'--timelimit','10'
,'--csv',output_file]
try:
proc = subprocess.run(process_command_line, encoding='utf8', capture_output=True)
out = proc.stdout
err = proc.stderr
print('out 186 is ',out)
assert re.search('OPTIMAL', out, re.MULTILINE)
assert re.search('num_search_workers: 2',err,re.MULTILINE)
except:
assert False
try:
# clean up the temp file
os.unlink(output_file)
except:
print('no file to delete')
# test exact round robin case, but just twice around
process_command_line = ['python','src/sports_schedule_sat.py'
,'-t','4'
,'-d','6'
,'-p','1'
,'--cpu','2'
,'--debug'
,'--timelimit','60'
,'--csv',output_file]
try:
proc = subprocess.run(process_command_line, encoding='utf8', capture_output=True)
out = proc.stdout
err = proc.stderr
assert re.search('OPTIMAL', out, re.MULTILINE)
assert re.search('num_search_workers: 2',err,re.MULTILINE)
except:
assert False
try:
# clean up the temp file
os.unlink(output_file)
except:
print('no file to delete')
``` |
{
"source": "jmarcet/comskip-recordings",
"score": 2
} |
#### File: jmarcet/comskip-recordings/comskip-recordings.py
```python
import asyncio
import os
from asyncio.subprocess import DEVNULL, PIPE
from glob import glob
COMSKIP_INI = '/etc/comskip.ini'
LOGFILE = '/storage/recordings/comskip.log'
RECORDINGS = '/storage/recordings'
COMSKIP = '/usr/bin/comskip'
INOTIFYWAIT = '/usr/bin/inotifywait'
IONICE = '/bin/ionice'
MKVMERGE = '/usr/bin/mkvmerge'
NICE = '/bin/nice'
def log(text):
print(text)
with open(LOGFILE, 'a') as f:
f.write(text + '\n')
def cleanup(filename, _check=True):
[os.remove(x) for x in glob(filename + '.*') if not x.endswith('.txt') and not x.endswith('.mpeg')]
if _check and (not os.path.exists(filename + '.txt') or not os.path.getsize(filename + '.txt')):
if os.path.exists(filename + '.mpeg'):
log('[WARNING]: something went wrong analyzing %s.mpeg, marking as already processed' % filename)
with open(filename + '.txt', 'w') as f:
f.write('[WARNING]: something went wrong analyzing this video\n')
else:
log('[WARNING]: something went wrong analyzing %s.mpeg' % filename)
async def run(*args, _filename=None):
if not _filename:
return await asyncio.create_subprocess_exec(*args, stdout=PIPE)
p = await asyncio.create_subprocess_exec(*args, stdout=DEVNULL, stderr=DEVNULL)
await p.wait()
if p.returncode != 0:
cleanup(_filename)
async def main():
proc = await run(INOTIFYWAIT, '-m', '-r', '-e', 'close_write', '--format', '%w%f', RECORDINGS)
while True:
recording = (await proc.stdout.readline()).decode(encoding='latin1', errors='replace').rstrip()
if recording.endswith('.mpeg') or recording.endswith('.mpeg-merged'):
filename = os.path.splitext(recording)[0]
elif recording.endswith('.mkvtoolnix.chapters'):
filename = recording.rpartition('.mkvtoolnix.chapters')[0]
else:
if recording.endswith('.log.txt'):
log(recording)
continue
if not os.path.exists(recording) or not os.path.isfile(recording):
log('[ERROR] unable to find %s' % recording)
continue
if recording.endswith('.mpeg'):
if os.path.exists(filename + '.txt'):
#log('(0/0) %s already processed' % recording)
continue
log('(1/3) Recording FILENAME="%s" ended' % recording)
log(' comskip --ini=%s "%s"' % (COMSKIP_INI, recording))
asyncio.create_task(run(NICE, '-n', '10', IONICE, '-c', '3', COMSKIP, '--ini=%s' % COMSKIP_INI, recording, _filename=filename))
elif recording.endswith('.mkvtoolnix.chapters'):
chapters = recording
merged = filename + '.mpeg-merged'
recording = filename + '.mpeg'
log('(2/3) Chapters FILENAME="%s" generated' % chapters)
if os.path.getsize(chapters) == 132:
log(' No commercials found, skipping...')
cleanup(filename, _check=False)
continue
log(' mkvmerge -o "%s" --chapters "%s" "%s"' % (merged, chapters, recording))
asyncio.create_task(run(MKVMERGE, '-o', merged, '--chapters', chapters, recording, _filename=filename))
elif recording.endswith('.mpeg-merged'):
merged = recording
recording = filename + '.mpeg'
log('(3/3) Commercial cutpoints FILENAME="%s" merged succesfully' % merged)
log(' mv "%s" "%s"' % (merged, recording))
try:
os.rename(merged, recording)
except:
log(' -> FAILED: could not move "%s" to "%s"' % (merged, recording))
cleanup(filename)
try:
asyncio.run(main())
except KeyboardInterrupt:
log('Good bye!')
``` |
{
"source": "jmarcet/dockers",
"score": 2
} |
#### File: config/.pyroscope/config.py
```python
def _custom_fields():
""" Yield custom field definitions.
"""
# Import some commonly needed modules
import os
from pyrocore.torrent import engine, matching
from pyrocore.util import fmt
# PUT CUSTOM FIELD CODE HERE
# Add rTorrent attributes not available by default
def get_tracker_field(obj, name, aggregator=sum):
"Get an aggregated tracker field."
return aggregator(obj._engine._rpc.t.multicall(obj._fields["hash"], 0, "t.%s=" % name)[0])
yield engine.OnDemandField(int, "is_partially_done", "is partially done", matcher=matching.FloatFilter)
yield engine.OnDemandField(int, "selected_size_bytes", "size of selected data", matcher=matching.FloatFilter)
yield engine.OnDemandField(int, "peers_connected", "number of connected peers", matcher=matching.FloatFilter)
yield engine.DynamicField(int, "downloaders", "number of completed downloads", matcher=matching.FloatFilter,
accessor=lambda o: get_tracker_field(o, "scrape_downloaded"))
yield engine.DynamicField(int, "seeds", "number of seeds", matcher=matching.FloatFilter,
accessor=lambda o: get_tracker_field(o, "scrape_complete"))
yield engine.DynamicField(int, "leeches", "number of leeches", matcher=matching.FloatFilter,
accessor=lambda o: get_tracker_field(o, "scrape_incomplete"))
yield engine.DynamicField(engine.untyped, "lastscraped", "time of last scrape", matcher=matching.TimeFilter,
accessor=lambda o: get_tracker_field(o, "scrape_time_last", max),
formatter=lambda dt: fmt.human_duration(float(dt), precision=2, short=True))
# Add peer attributes not available by default
def get_peer_data(obj, name, aggregator=None):
"Get some peer data via a multicall."
aggregator = aggregator or (lambda _: _)
result = obj._engine._rpc.p.multicall(obj._fields["hash"], 0, "p.%s=" % name)
return aggregator([i[0] for i in result])
yield engine.DynamicField(set, "peers_ip", "list of IP addresses for connected peers",
matcher=matching.TaggedAsFilter, formatter=", ".join,
accessor=lambda o: set(get_peer_data(o, "address")))
# Add file checkers
def has_nfo(obj):
"Check for .NFO file."
pathname = obj.path
if pathname and os.path.isdir(pathname):
return any(i.lower().endswith(".nfo") for i in os.listdir(pathname))
else:
return False if pathname else None
def has_thumb(obj):
"Check for folder.jpg file."
pathname = obj.path
if pathname and os.path.isdir(pathname):
return any(i.lower() == "folder.jpg" for i in os.listdir(pathname))
else:
return False if pathname else None
yield engine.DynamicField(engine.untyped, "has_nfo", "does download have a .NFO file?",
matcher=matching.BoolFilter, accessor=has_nfo,
formatter=lambda val: "NFO" if val else "!DTA" if val is None else "----")
yield engine.DynamicField(engine.untyped, "has_thumb", "does download have a folder.jpg file?",
matcher=matching.BoolFilter, accessor=has_thumb,
formatter=lambda val: "THMB" if val else "!DTA" if val is None else "----")
# Fields for partial downloads
def partial_info(obj, name):
"Helper for partial download info"
try:
return obj._fields[name]
except KeyError:
f_attr = ["get_completed_chunks", "get_size_chunks", "get_range_first", "get_range_second"]
chunk_size = obj.fetch("chunk_size")
prev_chunk = -1
size, completed, chunks = 0, 0, 0
for f in obj._get_files(f_attr):
if f.prio: # selected?
shared = int(f.range_first == prev_chunk)
size += f.size
completed += f.completed_chunks - shared
chunks += f.size_chunks - shared
prev_chunk = f.range_second - 1
obj._fields["partial_size"] = size
obj._fields["partial_missing"] = (chunks - completed) * chunk_size
obj._fields["partial_done"] = 100.0 * completed / chunks if chunks else 0.0
return obj._fields[name]
yield engine.DynamicField(int, "partial_size", "bytes selected for download",
matcher=matching.ByteSizeFilter,
accessor=lambda o: partial_info(o, "partial_size"))
yield engine.DynamicField(int, "partial_missing", "bytes missing from selected chunks",
matcher=matching.ByteSizeFilter,
accessor=lambda o: partial_info(o, "partial_missing"))
yield engine.DynamicField(float, "partial_done", "percent complete of selected chunks",
matcher=matching.FloatFilter,
accessor=lambda o: partial_info(o, "partial_done"))
# Map name field to TV series name, if applicable, else an empty string
from pyrocore.util import traits
def tv_mapper(obj, name, templ):
"Helper for TV name mapping"
try:
return obj._fields[name]
except KeyError:
itemname = obj.name
result = ""
kind, info = traits.name_trait(itemname, add_info=True)
if kind == "tv":
try:
info["show"] = ' '.join([i.capitalize() for i in info["show"].replace('.',' ').replace('_',' ').split()])
result = templ % info
except KeyError, exc:
#print exc
pass
obj._fields[name] = result
return result
yield engine.DynamicField(fmt.to_unicode, "tv_series", "series name of a TV item",
matcher=matching.PatternFilter, accessor= lambda o: tv_mapper(o, "tv_series", "%(show)s"))
yield engine.DynamicField(fmt.to_unicode, "tv_episode", "series name and episode number of a TV item",
matcher=matching.PatternFilter, accessor= lambda o: tv_mapper(o, "tv_episode", "%(show)s.S%(season)sE%(episode)s"))
# Disk space check
def has_room(obj):
"Check disk space."
pathname = obj.path
if pathname and not os.path.exists(pathname):
pathname = os.path.dirname(pathname)
if pathname and os.path.exists(pathname):
stats = os.statvfs(pathname)
return (stats.f_bavail * stats.f_frsize - int(diskspace_threshold_mb) * 1024**2
> obj.size * (1.0 - obj.done / 100.0))
else:
return None
yield engine.DynamicField(engine.untyped, "has_room",
"check whether the download will fit on its target device",
matcher=matching.BoolFilter, accessor=has_room,
formatter=lambda val: "OK" if val else "??" if val is None else "NO")
globals().setdefault("diskspace_threshold_mb", "500")
# Register our factory with the system
custom_field_factories.append(_custom_fields)
``` |
{
"source": "jmarckel/get-er-done",
"score": 2
} |
#### File: GetErDone/server/api.py
```python
import argparse
import copy
import base64
import calendar
import json
import logging
import os
import re
import requests
import stat
import tempfile
import time
import urllib
import traceback
from functools import wraps
import flask
from flask import Flask, redirect, request, jsonify, _request_ctx_stack, g
from flask_cors import CORS, cross_origin
from jose import jwt
from six.moves.urllib.request import urlopen
from . import Storage
# initial setup for flask
app = Flask(__name__)
CORS(app)
app_root = os.path.dirname(__file__)
app_runtime = os.path.join(app_root, '../../../runtime')
site_keyfile = os.path.join(app_runtime, 'api-site.key')
if(not os.path.exists(site_keyfile)):
with open(site_keyfile, "w+b") as keyfile:
keyfile.write(os.urandom(24))
os.chmod(site_keyfile, stat.S_IRUSR)
# read the secret key
#
# hmm, is this a conflict?
#
with open(site_keyfile, "rb") as keyfile:
app.secret_key = keyfile.read()
# load the auth configuration
auth_config = None
auth_config_file = os.path.join(app_runtime, 'get-er-done-config.json')
with open(auth_config_file, 'r') as cfgfile:
auth_config = json.loads(cfgfile.read())
# configure logging
logger = logging.getLogger('server')
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
fileLogger = logging.FileHandler(os.path.join(app_runtime, 'api-server.log'))
fileLogger.setFormatter(fmt)
logger.addHandler(fileLogger)
#
# common auth code
#
# Format error response and append status code.
class AuthError(Exception):
def __init__(self, error, status_code):
self.error = error
self.stack_trace = traceback.extract_tb()
self.status_code = status_code
@app.errorhandler(AuthError)
def handle_auth_error(ex):
logger.error("auth error '%s:%s': %s\n\nStack:\n%s"
% (request.method,
request.url,
json.dumps(ex.error),
ex.stack_trace))
response = jsonify(ex.error)
response.status_code = ex.status_code
return response
@app.errorhandler(Exception)
def handle_auth_error(ex):
logger.error("auth exception '%s:%s': %s\n\nStack:\n%s"
% (request.method,
request.url,
ex.__str__(),
traceback.extract_tb()))
response = jsonify(ex.__str__())
response.status_code = 500
return response
#
# SPA auth code
#
def get_access_token():
"""Obtains the access token from the Authorization Header
"""
logger.info('get_access_token()')
auth = request.headers.get("Authorization", None)
if not auth:
logger.error('get_access_token() no Authorization header')
logger.error('headers:\n%s' % (request.headers))
raise AuthError({"code": "authorization_header_missing",
"description":
"Authorization header is expected"}, 401)
parts = auth.split()
if parts[0].lower() != "bearer":
logger.error('get_access_token() invalid header')
raise AuthError({"code": "invalid_header",
"description":
"Authorization header must start with"
" Bearer"}, 401)
elif len(parts) == 1:
logger.error('get_access_token() not enough parts')
raise AuthError({"code": "invalid_header",
"description": "Token not found"}, 401)
elif len(parts) > 2:
logger.error('get_access_token() too many parts')
raise AuthError({"code": "invalid_header",
"description":
"Authorization header must be"
" Bearer token"}, 401)
token = parts[1]
return token
def has_required_scope(required_scope):
"""Determines if the required scope is present in the access token
Args:
required_scope (str): The scope required to access the resource
"""
token = get_access_token()
unverified_claims = jwt.get_unverified_claims(token)
if unverified_claims.get("scope"):
token_scopes = unverified_claims["scope"].split()
for token_scope in token_scopes:
if token_scope == required_scope:
return True
logger.info("action not supported by scope")
return False
def api_requires_auth(f):
"""Determines if the access token is valid
"""
@wraps(f)
def decorated(*args, **kwargs):
logger.info('api_requires_auth() %s' % (request.method))
access_token = get_access_token()
logger.info('api_requires_auth() have token: ' + access_token)
try:
unverified_header = jwt.get_unverified_header(access_token)
except jwt.exceptions.DecodeError as e:
logger.error('api_requires_auth() decode error ' + e.__str__())
raise
except jwt.exceptions.InvalidTokenError:
logger.error('api_requires_auth() invalid header')
raise AuthError(
{"code": "invalid_header",
"description": "Invalid header. Use RS256 signed JWT"},
401)
if unverified_header["alg"] == "HS256":
logger.error('api_requires_auth() invalid header alg')
raise AuthError(
{"code": "invalid_header",
"description": "Invalid header. "
"Use an RS256 signed JWT Access Token"},
401)
# fetch the well known keys
url = str("https://%s/.well-known/jwks.json"
% (auth_config['SPA']['domain']))
jsonurl = urlopen(url)
logger.info('api_requires_auth() fetched well known keys from: ' + url)
data = jsonurl.read()
jwks = json.loads(data.decode('utf8'))
logger.info('api_requires_auth() jwks: ' + data.decode('utf8'))
for key in jwks["keys"]:
if key["kid"] == unverified_header["kid"]:
logger.info('api_requires_auth() key id matched')
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"]
}
break
if rsa_key:
try:
logger.info("api_requires_auth() with rsa key for token '%s'"
% (access_token))
payload = jwt.decode(
access_token,
key=rsa_key,
algorithms=["RS256"],
audience=auth_config['SPA']['audience'],
issuer="https://" + auth_config['SPA']['domain'] + "/"
)
logger.info('api_requires_auth() key payload decoded')
except jwt.exceptions.ExpiredSignatureError:
logger.error('api_requires_auth() expired signature')
raise AuthError({"code": "token_expired",
"description": "token is expired"}, 401)
except jwt.exceptions.JWTClaimsError:
logger.error('api_requires_auth() invalid claims')
raise AuthError({"code": "invalid_claims",
"description": "incorrect claims,"
" please check the audience and issuer"},
401)
except Exception as e:
logger.error('api_requires_auth() exception')
raise AuthError({"code": "invalid_header",
"description":
"Unable to parse authentication"
" token. " + e.__str__()}, 401)
logger.info('api_requires_auth() all good!\n%s' % (payload))
# _request_ctx_stack.top.current_user = payload
g.authd_user = copy.deepcopy(payload)
return f(*args, **kwargs)
logger.error('api_requires_auth() no appropriate key')
raise AuthError({"code": "invalid_header",
"description": "Unable to find appropriate key"}, 401)
return decorated
#
# code associated with a list of tasks
#
def task_list_json_delete_handler():
response = None
try:
content = Storage.delete_all(g.authd_user['sub'])
response = jsonify(content)
response.status_code = 200
except(Storage.StorageException) as e:
logger.error("delete all exception '%s' headers:\n%s"
% (e.message, request.headers))
response = app.make_response(e.message)
response.status_code = 500
return(response)
def task_list_json_get_handler():
response = None
try:
if('assigned_by' in request.args):
logger.info('getting tasks assigned by user %s'
% (g.authd_user['sub']))
content = Storage.fetch_assigned(g.authd_user['sub'])
else:
logger.info('getting tasks assigned to user %s'
% (g.authd_user['sub']))
content = Storage.fetch_all(g.authd_user['sub'])
response = jsonify(content)
response.status_code = 200
except(Storage.StorageException) as e:
logger.error("fetch all exception '%s' headers:\n%s"
% (e.message, request.headers))
response = app.make_response(e.message)
response.status_code = 500
return(response)
def task_list_json_post_handler():
response = None
logger.info("json post: " + json.dumps(request.json))
try:
if('assign_to' in request.args):
request.json['assign_to'] = request.args['assign_to']
else:
if('assign_to' not in request.json):
request.json['assign_to'] = g.authd_user['sub']
Storage.store(g.authd_user['sub'], request.json)
response = app.make_response('OK')
response.status_code = 200
except(Storage.StorageException) as e:
logger.error("task_list_json_post_handler exception"
" '%s' headers:\n%s"
% (e.message, request.headers))
response = app.make_response(e.message)
response.status_code = 500
return(response)
def task_list_json_handler():
response = None
if(request.method == 'POST'):
if(has_required_scope('write:tasks') is True):
logger.debug('add a new task')
response = task_list_json_post_handler()
elif(request.method == 'GET'):
if(has_required_scope('read:tasks') is True):
logger.debug('list all tasks')
response = task_list_json_get_handler()
elif(request.method == 'DELETE'):
if(has_required_scope('delete:tasks') is True):
logger.debug('delete all tasks')
response = task_list_json_delete_handler()
if( response is None):
response = app.make_response('forbidden')
response.status_code = 500
return(response)
@app.route('/tasks', methods=['POST', 'GET', 'DELETE'])
@cross_origin(headers=["Content-Type", "Authorization"])
@cross_origin(headers=["Access-Control-Allow-Origin", "*"])
@api_requires_auth
def task_list_handler():
logger.info("tasks called method = '%s'" % (request.method))
response = None
if(g.authd_user is not None):
logger.info('headers: %s' % (request.headers))
response = task_list_json_handler()
else:
logger.error('authd_user false: %s' % (request.headers))
response = app.make_response('forbidden')
response.status_code = 500
return(response)
@app.route('/assigned', methods=['POST', 'GET', 'DELETE'])
@cross_origin(headers=["Content-Type", "Authorization"])
@cross_origin(headers=["Access-Control-Allow-Origin", "*"])
@api_requires_auth
def assigned_task_list_handler():
logger.info("assigned tasks called method = '%s'" % (request.method))
response = None
if(g.authd_user is not None):
if(has_required_scope('assign:tasks') is True):
response = task_list_json_handler()
else:
logger.error('has_required_scope false: %s' % (request.headers))
response = app.make_response('forbidden')
response.status_code = 500
else:
logger.error('authd_user false: %s' % (request.headers))
response = app.make_response('forbidden')
response.status_code = 500
return(response)
#
# code associated with individual tasks
#
def task_json_put_handler():
response = None
try:
Storage.store(g.authd_user['sub'], request.json)
response = app.make_response('OK')
response.status_code = 200
except(Storage.StorageException) as e:
logger.error("task put exception '%s' headers: %s"
% (e.message, request.headers))
response = app.make_response(e.message)
response.status_code = 500
return(response)
# UNUSED ...
@app.route('/tasks/<uuid:task_id>', methods=['PUT', 'GET', 'DELETE'])
@cross_origin(headers=["Content-Type", "Authorization"])
@api_requires_auth
def task_handler():
response = None
if(request.headers['Content-Type'] == 'application/json'):
response = task_json_handler()
else:
response = app.make_response('forbidden')
response.status_code = 500
return(response)
def main():
parser = argparse.ArgumentParser(description='get er done server')
parser.add_argument('-v', '--verbose',
action='store_true', help='show verbose logging')
args = parser.parse_args()
if(args.verbose):
logger.setLevel(logging.DEBUG)
consoleLogger = logging.StreamHandler()
consoleLogger.setFormatter(fmt)
logger.addHandler(consoleLogger)
app.run()
if __name__ == "__main__":
main()
```
#### File: GetErDone/server/spa.py
```python
import argparse
import calendar
import json
import logging
import os
import re
import requests
import stat
import tempfile
import time
import urllib
from functools import wraps
import flask
from flask import Flask, redirect, request, jsonify, render_template, url_for
# initial setup for flask
app = Flask(__name__)
app_root = os.path.dirname(__file__)
app_runtime = os.path.join(app_root, '../../../runtime')
site_keyfile = os.path.join(app_runtime, 'spa-site.key')
if(not os.path.exists(site_keyfile)):
with open(site_keyfile, "w+b") as keyfile:
keyfile.write(os.urandom(24))
os.chmod(site_keyfile, stat.S_IRUSR)
# read the secret key
with open(site_keyfile, "rb") as keyfile:
app.secret_key = keyfile.read()
# load the auth configuration
auth_config = None
auth_config_file = os.path.join(app_runtime, 'get-er-done-config.json')
with open(auth_config_file, 'r') as cfgfile:
auth_config = json.loads(cfgfile.read())
# configure logging
logger = logging.getLogger('server')
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
fileLogger = logging.FileHandler(os.path.join(app_runtime, 'spa-server.log'))
fileLogger.setFormatter(fmt)
logger.addHandler(fileLogger)
#
# common auth code
#
# Format error response and append status code.
class AuthError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
@app.errorhandler(AuthError)
def handle_auth_error(ex):
logger.error("auth error accessing '%s:%s': %s"
% (request.method,
request.url, json.dumps(ex.error)))
response = jsonify(ex.error)
response.status_code = ex.status_code
return response
@app.errorhandler(Exception)
def handle_auth_error(ex):
logger.error("auth exception accessing '%s:%s': %s"
% (request.method, request.url, ex.__str__()))
response = jsonify(ex.__str__())
response.status_code = 500
return response
@app.route('/get-er-done')
def get_er_done():
logger.info('get_er_done called')
c = render_template('get-er-done.html', site_state='')
return c
@app.route('/get-er-done/script')
def get_er_done_script():
logger.info('serving get-er-done.js template')
# only pass the SPA config to avoid risk of exposing WEBAPP secrets
c = render_template('get-er-done.js', auth_config=auth_config['SPA'])
return c
@app.route('/')
def index():
return redirect(url_for('get_er_done'))
def main():
parser = argparse.ArgumentParser(description='get er done SPA server')
parser.add_argument('-v', '--verbose',
action='store_true', help='show verbose logging')
args = parser.parse_args()
if(args.verbose):
logger.setLevel(logging.DEBUG)
consoleLogger = logging.StreamHandler()
consoleLogger.setFormatter(fmt)
logger.addHandler(consoleLogger)
app.run()
if __name__ == "__main__":
main()
```
#### File: GetErDone/server/Storage.py
```python
import logging
import os
import sqlite3
import time
logger = logging.getLogger('server')
app_root = os.path.dirname(__file__)
app_dbdir = os.path.join(app_root, '../../../runtime')
databaseName = os.path.join(app_dbdir, "get_er_done.sql3")
logger.info('db is %s' % (databaseName))
class StorageException(Exception):
def __init__(message):
self.message = message
def delete(user_id, data):
setup()
conn = sqlite3.connect(databaseName)
c = conn.cursor()
c.execute("DELETE FROM tasks where user_id = ? and task_id = ?",
(user_id, data.order))
conn.commit()
conn.close()
def delete_all(user_id, data):
setup()
conn = sqlite3.connect(databaseName)
c = conn.cursor()
c.execute("DELETE FROM tasks where user_id = ?", (user_id))
conn.commit()
conn.close()
def fetch(user_id, data):
setup()
reply = {}
conn = sqlite3.connect(databaseName)
c = conn.cursor()
for row in c.execute("SELECT task_id, title, priority, done"
" FROM tasks where user_id = ?"
" AND task_id = ?"
" AND status = 'ACTIVE'",
(user_id, data.order,)):
reply['order'] = row[0]
reply['title'] = row[1]
reply['priority'] = row[2]
if(row[3] == 1):
reply['done'] = True
else:
reply['done'] = False
conn.close()
return reply
def fetch_assigned(user_id):
setup()
reply = []
conn = sqlite3.connect(databaseName)
c = conn.cursor()
for row in c.execute('SELECT task_id,'
' title, priority, done, user_id'
' FROM tasks where assigned_by = ?'
' AND status = "ACTIVE" order by task_id',
(user_id,)):
task = {}
task['order'] = row[0]
task['title'] = row[1]
task['priority'] = row[2]
if(row[3] == 1):
task['done'] = True
else:
task['done'] = False
task['user_id'] = row[4]
reply.append(task)
conn.close()
return reply
def fetch_all(user_id):
setup()
reply = []
conn = sqlite3.connect(databaseName)
c = conn.cursor()
for row in c.execute('SELECT task_id, title,'
' priority, done'
' FROM tasks WHERE user_id = ?'
' AND status = "ACTIVE"'
' ORDER BY task_id',
(user_id,)):
task = {}
task['order'] = row[0]
task['title'] = row[1]
task['priority'] = row[2]
if(row[3] == 1):
task['done'] = True
else:
task['done'] = False
reply.append(task)
conn.close()
return reply
def fetch_users(user_id):
setup()
reply = []
conn = sqlite3.connect(databaseName)
c = conn.cursor()
for row in c.execute('SELECT distinct user_id'
' FROM tasks WHERE user_id != ?'
' ORDER BY user_id',
(user_id,)):
user = {}
user['name'] = row[0]
reply.append(user)
conn.close()
return reply
def setup():
if(os.path.exists(databaseName)):
logger.debug('detected existing database')
else:
logger.debug('creating new database')
conn = sqlite3.connect(databaseName)
c = conn.cursor()
c.execute('''CREATE TABLE tasks ( user_id text,
task_id text,
title text,
priority text,
done bool,
assigned_by text,
status text,
status_dt text,
create_dt text)''')
conn.commit()
conn.close()
def store(user_id, data):
setup()
stor_dt = time.strftime("%Y%m%d %H:%M:%S", time.gmtime())
done_status = 0
if(data['done'] is True):
done_status = 1
conn = sqlite3.connect(databaseName)
c = conn.cursor()
# roll any previous entry off into history
c.execute("UPDATE tasks SET status = ?, status_dt = ? "
"WHERE task_id = ? and user_id = ?",
('HISTORY', stor_dt, data['order'], user_id,))
# insert the new record
c.execute("INSERT INTO tasks (user_id, task_id, title,"
" priority, done, status, status_dt, create_dt,"
" assigned_by) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
(data['assign_to'], data['order'], data['title'],
data['priority'], done_status, 'ACTIVE',
stor_dt, stor_dt, user_id,))
conn.commit()
logger.info("stored user %s order %s title '%s'"
% (user_id, data['order'], data['title']))
conn.close()
``` |
{
"source": "jmarcorb/spotcot",
"score": 2
} |
#### File: spotcot/spotcot/classes.py
```python
import logging
import socket
import threading
import time
import typing
import spot_sdk
import spotcot
__author__ = '<NAME> W2GMD <<EMAIL>>'
__copyright__ = 'Copyright 2020 Orion Labs, Inc.'
__license__ = 'Apache License, Version 2.0'
class SpotCoT(threading.Thread):
"""Spot Cursor-on-Target Threaded Class."""
_logger = logging.getLogger(__name__)
if not _logger.handlers:
_logger.setLevel(spotcot.LOG_LEVEL)
_console_handler = logging.StreamHandler()
_console_handler.setLevel(spotcot.LOG_LEVEL)
_console_handler.setFormatter(spotcot.LOG_FORMAT)
_logger.addHandler(_console_handler)
_logger.propagate = False
def __init__(self, api_key: str, password: str, cot_host: str, interval: typing.Any) -> None: # NOQA
self.api_key = api_key
self.password = password
self.cot_host = cot_host
self.interval = interval or spotcot.QUERY_INTERVAL
self.full_addr = spotcot.get_full_addr(cot_host)
# Thread Management:
threading.Thread.__init__(self)
self._stopped = False
def stop(self) -> bool:
"""Stops a SpotCot Thread (at the next opportunity)."""
self._stopped = True
return self._stopped
def run(self) -> None:
"""Starts a SpotCoT Thread."""
spot_feed = spotcot.create_spot_feed(self.api_key, self.password)
self._logger.info("SpotCoT running against CoT Host %s", self.cot_host)
while 1:
try:
spot_feed.collect()
except spot_sdk.SpotSDKError as exc:
self._logger.warning(
"spot_sdk's collect() threw an Exception (ignored): ")
self._logger.exception(exc)
if spot_feed.count() and spot_feed.messages:
self.send_cot(spot_feed)
self._logger.debug('Sleeping for %s seconds...', self.interval)
time.sleep(self.interval)
def send_cot(self, spot_feed):
"""Sends an Spot message in CoT format to a remote host using UDP."""
first_message: object = spotcot.get_first_message(spot_feed)
self._logger.debug('First Spot Message: ')
self._logger.debug(first_message)
cot_event: object = spotcot.spot_to_cot(first_message)
if cot_event is None:
return None
rendered_event: str = cot_event.render(
encoding='UTF-8', standalone=True)
if rendered_event is None:
return None
self._logger.debug(
'Sending %s char CoT Event to %s: ',
len(rendered_event),
self.full_addr
)
self._logger.debug(rendered_event)
MULTICAST_TTL = 5
#to avoid dependencies, very low level cheching if address is multicast. Should be done with re.
firstbyte = str(self.full_addr)[2:5]
print(firstbyte)
if firstbyte.isnumeric() and int(firstbyte) > 223 and int(firstbyte) < 240:
cot_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
cot_socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, MULTICAST_TTL)
else:
cot_socket: socket.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
return cot_socket.sendto(rendered_event, self.full_addr)
except Exception as exc:
self._logger.debug(
'Sending CoT Event raised an Exception (ignored): ')
self._logger.exception(exc)
return None
``` |
{
"source": "jmarecek/OnlineLDS",
"score": 2
} |
#### File: jmarecek/OnlineLDS/experiments.py
```python
from __future__ import print_function
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import scipy.optimize as opt
import numpy as np
import rlcompleter
from sklearn.metrics import f1_score
import time
import timeit
import math
# debugging
import pdb
pdb.Pdb.complete=rlcompleter.Completer(locals()).complete
import traceback
# Matlab loading
import tables
from scipy.io import loadmat
verbose = False
from onlinelds import *
from inputlds import *
def close_all_figs():
plt.close('all')
def testIdentification(sys, filenameStub = "test", noRuns = 2, T = 100, k = 5, etaZeros = None, ymin = None, ymax = None, sequenceLabel = None, haveSpectral = True):
""" noRuns is the number of runs, T is the time horizon, k is the number of filters, """
if k>T:
print("Number of filters (k) must be less than or equal to the number of time-steps (T).")
exit()
if not etaZeros:
etaZeros = [1.0, 2500.0]
print("etaZeros:")
print(etaZeros)
filename = './outputs/' + filenameStub+'.pdf'
pp = PdfPages(filename)
error_AR_data = None
error_spec_data = None
error_persist_data = None
for i in range(noRuns):
print("run %i" % i)
inputs = np.zeros(T)
sys.solve([[1],[0]],inputs,T)
if haveSpectral:
predicted_spectral, M, error_spec, error_persist = wave_filtering_SISO_ftl(sys, T, k)
if error_spec_data is None: error_spec_data = error_spec
else: error_spec_data = np.vstack((error_spec_data, error_spec))
if error_persist_data is None: error_persist_data = error_persist
else: error_persist_data = np.vstack((error_persist_data, error_persist))
for etaZero in etaZeros:
error_AR = np.zeros(T)
predicted_AR = np.zeros(T)
s=2
D=1.
theta = [0 for i in range(s)]
for t in range(s,T):
eta = pow(float(t),-0.5) / etaZero
Y = sys.outputs[t]
loss = cost_AR(theta, Y, list(reversed(sys.outputs[t-s:t])))
error_AR[t] = pow(loss, 0.5)
grad = gradient_AR(theta, Y, list(reversed(sys.outputs[t-s:t])))
#print("Loss: at time step %d :" % (t), loss)
theta = [theta[i] -eta*grad[i] for i in range(len(theta))] #gradient step
norm_theta = np.linalg.norm(theta)
if norm_theta>D: theta = [D*i/norm_theta for i in theta] #projection step
predicted_AR[t] = np.dot(list(reversed(sys.outputs[t-s:t])),theta)
if error_AR_data is None: error_AR_data = error_AR
else: error_AR_data = np.vstack((error_AR_data, error_AR))
p1 = plt.figure()
if ymax and ymin: plt.ylim(ymin, ymax)
if sum(inputs[1:]) > 0: plt.plot(inputs[1:], label='Input')
if sequenceLabel: plt.plot([float(i) for i in sys.outputs][1:], label=sequenceLabel, color='#000000', linewidth=2, antialiased = True)
else: plt.plot([float(i) for i in sys.outputs][1:], label='Output', color='#000000', linewidth=2, antialiased = True)
#plt.plot([-i for i in predicted_output], label='Predicted output') #for some reason, usual way produces -ve estimate
if haveSpectral:
plt.plot([i for i in predicted_spectral], label='Spectral')
#lab = 'AR(3) / OGD, c_0 = ' + str(etaZero)
lab = "AR(" + str(s) + "), c = " + str(int(etaZero))
plt.plot(predicted_AR, label = lab)
plt.legend()
plt.xlabel('Time')
plt.ylabel('Output')
p1.show()
p1.savefig(pp, format='pdf')
p2 = plt.figure()
plt.ylim(0, 20)
if haveSpectral:
plt.plot(error_spec, label='Spectral')
plt.plot(error_persist, label='Persistence')
plt.plot(error_AR, label=lab)
plt.legend()
p2.show()
plt.xlabel('Time')
plt.ylabel('Error')
p2.savefig(pp, format='pdf')
error_AR_mean = np.mean(error_AR_data, 0)
error_AR_std = np.std(error_AR_data, 0)
if haveSpectral:
error_spec_mean = np.mean(error_spec_data, 0)
error_spec_std = np.std(error_spec_data, 0)
error_persist_mean = np.mean(error_persist_data, 0)
error_persist_std = np.std(error_persist_data, 0)
p3 = plt.figure()
if ymax and ymin: plt.ylim(ymin, ymax)
if haveSpectral:
plt.plot(error_spec_mean, label='Spectral', color='#1B2ACC', linewidth=2, antialiased = True)
plt.fill_between(range(0,T-1), error_spec_mean-error_spec_std, error_spec_mean+error_spec_std, alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF',
linewidth=1, antialiased=True)
plt.plot(error_persist_mean, label='Persistence', color='#CC1B2A', linewidth=2, antialiased = True)
plt.fill_between(range(0,T-1), error_persist_mean-error_persist_std, error_persist_mean+error_persist_std, alpha=0.2, edgecolor='#CC1B2A', facecolor='#FF0800',
linewidth=1, antialiased=True)
cAR1 = (42.0/255, 204.0 / 255.0, 1.0/255)
bAR1 = (1.0, 204.0 / 255.0, 0.0) # , alphaValue
plt.ylim(0, 20)
plt.plot(error_AR_mean, label='AR(3)', color=cAR1, linewidth=2, antialiased = True)
plt.fill_between(range(0,T), error_AR_mean-error_AR_std, error_AR_mean+error_AR_std, alpha=0.2, edgecolor=cAR1, facecolor=bAR1,
linewidth=1, antialiased=True)
plt.legend()
plt.xlabel('Time')
plt.ylabel('Error')
p3.savefig(pp, format='pdf')
pp.close()
print("See the output in " + filename)
def testIdentification2(T = 100, noRuns = 10, sChoices = [15,3,1], haveKalman = False, haveSpectral = True, G = np.matrix([[0.999,0],[0,0.5]]), F_dash = np.matrix([[1,1]]), sequenceLabel = ""):
if haveKalman: sChoices = sChoices + [T]
if len(sequenceLabel) > 0: sequenceLabel = " (" + sequenceLabel + ")"
if noRuns < 2:
print("Number of runs has to be larger than 1.")
exit()
filename = './outputs/AR.pdf'
pp = PdfPages(filename)
################# SYSTEM ###################
proc_noise_std = 0.5
obs_noise_std = 0.5
error_spec_data = None
error_persist_data = None
error_AR1_data = None
error_Kalman_data = None
for runNo in range(noRuns):
sys = dynamical_system(G,np.zeros((2,1)),F_dash,np.zeros((1,1)),
process_noise='gaussian',
observation_noise='gaussian',
process_noise_std=proc_noise_std,
observation_noise_std=obs_noise_std,
timevarying_multiplier_b = None)
inputs = np.zeros(T)
sys.solve([[1],[1]],inputs,T)
Y = [i[0,0] for i in sys.outputs]
#pdb.set_trace()
############################################
########## PRE-COMPUTE FILTER PARAMS ###################
n = G.shape[0]
m = F_dash.shape[0]
W = proc_noise_std**2 * np.matrix(np.eye(n))
V = obs_noise_std**2 * np.matrix(np.eye(m))
#m_t = [np.matrix([[0],[0]])]
C = [np.matrix(np.eye(2))]
R = []
Q = []
A = []
Z = []
for t in range(T):
R.append(G * C[-1] * G.transpose() + W)
Q.append(F_dash * R[-1] * F_dash.transpose() + V)
A.append(R[-1]*F_dash.transpose()*np.linalg.inv(Q[-1]))
C.append(R[-1] - A[-1]*Q[-1]*A[-1].transpose() )
Z.append(G*( np.eye(2) - A[-1] * F_dash ))
#PREDICTION
plt.plot(Y, label='Output', color='#000000', linewidth=2, antialiased = True)
for s in sChoices:
Y_pred=[]
for t in range(T):
Y_pred_term1 = F_dash * G * A[t] * sys.outputs[t]
if t==0:
Y_pred.append(Y_pred_term1)
continue
acc = 0
for j in range(min(t,s)+1):
for i in range(j+1):
if i==0:
ZZ=Z[t-i]
continue
ZZ = ZZ*Z[t-i]
acc += ZZ * G * A[t-j-1] * Y[t-j-1]
Y_pred.append(Y_pred_term1 + F_dash*acc)
#print(np.linalg.norm([Y_pred[i][0,0] - Y[i] for i in range(len(Y))]))
#print(lab)
if s == 1:
if error_AR1_data is None: error_AR1_data = np.array([pow(np.linalg.norm(Y_pred[i][0,0] - Y[i]), 2) for i in range(len(Y))])
else:
#print(error_AR1_data.shape)
error_AR1_data = np.vstack((error_AR1_data, [pow(np.linalg.norm(Y_pred[i][0,0] - Y[i]), 2) for i in range(len(Y))]))
if s == T:
# For the spectral filtering etc, we use: loss = pow(np.linalg.norm(sys.outputs[t] - y_pred), 2)
if error_Kalman_data is None: error_Kalman_data = np.array([pow(np.linalg.norm(Y_pred[i][0,0] - Y[i]), 2) for i in range(len(Y))])
else: error_Kalman_data = np.vstack((error_Kalman_data, [pow(np.linalg.norm(Y_pred[i][0,0] - Y[i]), 2) for i in range(len(Y))]))
plt.plot([i[0,0] for i in Y_pred], label="Kalman" + sequenceLabel, color=(42.0/255.0, 204.0 / 255.0, 200.0/255.0), linewidth=2, antialiased = True)
else:
plt.plot([i[0,0] for i in Y_pred], label='AR(%i)' % (s+1) + sequenceLabel, color=(42.0/255.0, 204.0 / 255.0, float(min(255.0,s))/255.0), linewidth=2, antialiased = True)
plt.xlabel('Time')
plt.ylabel('Prediction')
if haveSpectral:
predicted_output, M, error_spec, error_persist = wave_filtering_SISO_ftl(sys, T, 5)
plt.plot(predicted_output, label='Spectral' + sequenceLabel, color='#1B2ACC', linewidth=2, antialiased = True)
if error_spec_data is None: error_spec_data = error_spec
else: error_spec_data = np.vstack((error_spec_data, error_spec))
if error_persist_data is None: error_persist_data = error_persist
else: error_persist_data = np.vstack((error_persist_data, error_persist))
plt.legend()
plt.savefig(pp, format='pdf')
plt.close('all')
#plt.show()
if haveSpectral:
error_spec_mean = np.mean(error_spec_data, 0)
error_spec_std = np.std(error_spec_data, 0)
error_persist_mean = np.mean(error_persist_data, 0)
error_persist_std = np.std(error_persist_data, 0)
error_AR1_mean = np.mean(error_AR1_data, 0)
error_AR1_std = np.std(error_AR1_data, 0)
if haveKalman:
error_Kalman_mean = np.mean(error_Kalman_data, 0)
error_Kalman_std = np.std(error_Kalman_data, 0)
for (ylim, alphaValue) in [((0, 100.0), 0.2), ((0.0, 1.0), 0.05)]:
for Tlim in [T-1, min(T-1, 20)]:
#p3 = plt.figure()
p3, ax = plt.subplots()
plt.ylim(ylim)
if haveSpectral:
plt.plot(range(0,Tlim), error_spec[:Tlim], label='Spectral' + sequenceLabel, color='#1B2ACC', linewidth=2, antialiased = True)
plt.fill_between(range(0,Tlim), (error_spec_mean-error_spec_std)[:Tlim], (error_spec_mean+error_spec_std)[:Tlim], alpha=alphaValue, edgecolor='#1B2ACC', facecolor='#089FFF', linewidth=1, antialiased=True)
plt.plot(range(0,Tlim), error_persist[:Tlim], label='Persistence' + sequenceLabel, color='#CC1B2A', linewidth=2, antialiased = True)
plt.fill_between(range(0,Tlim), (error_persist_mean-error_persist_std)[:Tlim], (error_persist_mean+error_persist_std)[:Tlim], alpha=alphaValue, edgecolor='#CC1B2A', facecolor='#FF0800', linewidth=1, antialiased=True)
#import matplotlib.transforms as mtransforms
#trans = mtransforms.blended_transform_factory(ax.transData, ax.transData)
#trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)
cAR1 = (42.0/255, 204.0 / 255.0, 1.0/255)
bAR1 = (1.0, 204.0 / 255.0, 0.0) # , alphaValue
print(cAR1)
print(bAR1)
#print(error_AR1_data)
#print(error_AR1_mean)
#print(Tlim)
plt.plot(error_AR1_mean[:Tlim], label='AR(2)' + sequenceLabel, color=cAR1, linewidth=2, antialiased = True)
plt.fill_between(range(0,Tlim), (error_AR1_mean-error_AR1_std)[:Tlim], (error_AR1_mean+error_AR1_std)[:Tlim], alpha=alphaValue, edgecolor=cAR1, facecolor=bAR1, linewidth=1, antialiased=True) #transform=trans) #offset_position="data") alpha=alphaValue,
if haveKalman:
cK = (42.0/255.0, 204.0 / 255.0, 200.0/255.0)
bK = (1.0, 204.0 / 255.0, 200.0/255.0) # alphaValue
print(cK)
print(bK)
plt.plot(error_Kalman_mean[:Tlim], label='Kalman' + sequenceLabel, color=cK, linewidth=2, antialiased = True)
plt.fill_between(range(0,Tlim), (error_Kalman_mean-error_Kalman_std)[:Tlim], (error_Kalman_mean+error_Kalman_std)[:Tlim], alpha=alphaValue, facecolor=bK, edgecolor=cK, linewidth=1, antialiased=True) # transform = trans) #offset_position="data")
plt.legend()
plt.xlabel('Time')
plt.ylabel('Error')
#p3.show()
p3.savefig(pp, format='pdf')
pp.close()
# This is taken from pyplot documentation
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def testNoiseImpact(T = 50, noRuns = 10, discretisation = 10):
filename = './outputs/noise.pdf'
pp = PdfPages(filename)
for s in [1, 2, 3, 7]:
data = np.zeros((discretisation, discretisation))
diff = np.zeros((discretisation, discretisation))
ratio = np.zeros((discretisation, discretisation))
errKalman = np.zeros((discretisation, discretisation))
errAR = np.zeros((discretisation, discretisation))
################# SYSTEM ###################
G = np.matrix([[0.999,0],[0,0.5]])
F_dash = np.matrix([[1,1]])
for proc_noise_i in range(discretisation):
proc_noise_std = float(proc_noise_i + 1) / (discretisation - 1)
for obs_noise_i in range(discretisation):
obs_noise_std = float(obs_noise_i + 1) / (discretisation - 1)
for runNo in range(noRuns):
sys = dynamical_system(G,np.zeros((2,1)),F_dash,np.zeros((1,1)),
process_noise='gaussian',
observation_noise='gaussian',
process_noise_std=proc_noise_std,
observation_noise_std=obs_noise_std,
timevarying_multiplier_b = None)
inputs = np.zeros(T)
sys.solve([[1],[1]],inputs,T)
Y = [i[0,0] for i in sys.outputs]
#pdb.set_trace()
############################################
########## PRE-COMPUTE FILTER PARAMS ###################
n = G.shape[0]
m = F_dash.shape[0]
W = proc_noise_std**2 * np.matrix(np.eye(n))
V = obs_noise_std**2 * np.matrix(np.eye(m))
#m_t = [np.matrix([[0],[0]])]
C = [np.matrix(np.eye(2))]
R = []
Q = []
A = []
Z = []
for t in range(T):
R.append(G * C[-1] * G.transpose() + W)
Q.append(F_dash * R[-1] * F_dash.transpose() + V)
A.append(R[-1]*F_dash.transpose()*np.linalg.inv(Q[-1]))
C.append(R[-1] - A[-1]*Q[-1]*A[-1].transpose() )
#Z.append(G*( np.eye(2) - F_dash.transpose()*A[-1].transpose() ))
Z.append(G*( np.eye(2) - A[-1] * F_dash ))
#PREDICTION
Y_pred = []
Y_kalman = []
for t in range(T):
Y_pred_term1 = F_dash * G * A[t] * sys.outputs[t]
if t==0:
Y_pred.append(Y_pred_term1)
Y_kalman.append(Y_pred_term1)
continue
acc = 0
for j in range(min(t,s)+1):
for i in range(j+1):
if i==0:
ZZ=Z[t-i]
continue
ZZ = ZZ*Z[t-i]
acc += ZZ * G * A[t-j-1] * Y[t-j-1]
Y_pred.append(Y_pred_term1 + F_dash*acc)
accKalman = 0
for j in range(t+1):
for i in range(j+1):
if i==0:
ZZ=Z[t-i]
continue
ZZ = ZZ*Z[t-i]
accKalman += ZZ * G * A[t-j-1] * Y[t-j-1]
Y_kalman.append(Y_pred_term1 + F_dash*accKalman)
data[proc_noise_i][obs_noise_i] += np.linalg.norm([Y_pred[i][0,0] - Y[i] for i in range(len(Y))])
diffHere = np.linalg.norm([Y_pred[i][0,0] - Y[i] for i in range(len(Y))])
#print(Y_kalman[0][0,0])
diffHere -= np.linalg.norm([Y_kalman[i][0,0] - Y[i] for i in range(min(len(Y),len(Y_kalman)))])
#print(diffHere)
diff[proc_noise_i][obs_noise_i] += diffHere
#print(len(Y))
#print(len(Y_kalman))
errKalman[proc_noise_i][obs_noise_i] += pow(np.linalg.norm([Y_kalman[i][0,0] - Y[i] for i in range(min(len(Y),len(Y_kalman)))]), 2)
errAR[proc_noise_i][obs_noise_i] += pow(np.linalg.norm([Y_pred[i][0,0] - Y[i] for i in range(len(Y))]), 2)
data = data / noRuns
fig, ax = plt.subplots()
tickLabels = [str(float(i+1) / 10) for i in range(11)]
im, cbar = heatmap(data, tickLabels, tickLabels, ax=ax, cmap="YlGn", cbarlabel="Avg. RMSE of AR(%i), %s runs" % (s+1, noRuns))
plt.ylabel('Variance of process noise')
plt.xlabel('Variance of observation noise')
fig.tight_layout()
plt.savefig(pp, format='pdf')
#plt.show()
diff = diff / noRuns
fig, ax = plt.subplots()
tickLabels = [str(float(i+1) / 10) for i in range(11)]
im, cbar = heatmap(diff, tickLabels, tickLabels, ax=ax, cmap="YlOrRd", cbarlabel="Avg. diff. in RMSEs of AR(%i) and Kalman filter, %s runs" % (s+1, noRuns))
plt.ylabel('Variance of process noise')
plt.xlabel('Variance of observation noise')
fig.tight_layout()
plt.savefig(pp, format='pdf')
#plt.show()
ratio = pow(errKalman / errAR, 2)
fig, ax = plt.subplots()
tickLabels = [str(float(i+1) / 10) for i in range(11)]
im, cbar = heatmap(ratio, tickLabels, tickLabels, ax=ax, cmap="PuBu", cbarlabel="Ratios of agg. errors of Kalman and AR(%i), %s runs" % (s+1, noRuns))
plt.ylabel('Variance of process noise')
plt.xlabel('Variance of observation noise')
fig.tight_layout()
plt.savefig(pp, format='pdf')
pp.close()
def testImpactOfS(T = 200, noRuns = 100, sMax = 15):
if sMax > T:
print("The number of s to test must be less than the horizon T.")
exit()
filename = './outputs/impacts.pdf'
pp = PdfPages(filename)
for (proc_noise_std, obs_noise_std, linestyle) in [ (0.1, 0.1, "dotted"), (0.1, 1.0, "dashdot"), (1.0, 0.1, "dashed"), (1.0, 1.0, "solid") ]:
errAR = np.zeros((sMax+1, noRuns))
################# SYSTEM ###################
G = np.matrix([[0.999,0],[0,0.5]])
F_dash = np.matrix([[1,1]])
for s in range(1, sMax):
for runNo in range(noRuns):
sys = dynamical_system(G,np.zeros((2,1)),F_dash,np.zeros((1,1)),
process_noise='gaussian',
observation_noise='gaussian',
process_noise_std=proc_noise_std,
observation_noise_std=obs_noise_std,
timevarying_multiplier_b = None)
inputs = np.zeros(T)
sys.solve([[1],[1]],inputs,T)
Y = [i[0,0] for i in sys.outputs]
#pdb.set_trace()
############################################
########## PRE-COMPUTE FILTER PARAMS ###################
n = G.shape[0]
m = F_dash.shape[0]
W = proc_noise_std**2 * np.matrix(np.eye(n))
V = obs_noise_std**2 * np.matrix(np.eye(m))
#m_t = [np.matrix([[0],[0]])]
C = [np.matrix(np.eye(2))]
R = []
Q = []
A = []
Z = []
for t in range(T):
R.append(G * C[-1] * G.transpose() + W)
Q.append(F_dash * R[-1] * F_dash.transpose() + V)
A.append(R[-1]*F_dash.transpose()*np.linalg.inv(Q[-1]))
C.append(R[-1] - A[-1]*Q[-1]*A[-1].transpose() )
#Z.append(G*( np.eye(2) - F_dash.transpose()*A[-1].transpose() ))
Z.append(G*( np.eye(2) - A[-1] * F_dash ))
#PREDICTION
Y_pred = []
for t in range(T):
Y_pred_term1 = F_dash * G * A[t] * sys.outputs[t]
if t==0:
Y_pred.append(Y_pred_term1)
continue
acc = 0
for j in range(min(t,s)+1):
for i in range(j+1):
if i==0:
ZZ=Z[t-i]
continue
ZZ = ZZ*Z[t-i]
acc += ZZ * G * A[t-j-1] * Y[t-j-1]
Y_pred.append(Y_pred_term1 + F_dash*acc)
errAR[s][runNo] = pow(np.linalg.norm([Y_pred[i][0,0] - Y[i] for i in range(min(len(Y), len(Y_pred)))]), 2) / T
error_AR1_mean = np.mean(errAR, 1)
error_AR1_std = np.std(errAR, 1)
print(len(error_AR1_mean))
alphaValue = 0.2
cAR1 = (proc_noise_std, obs_noise_std, 1.0/255)
#plt.plot(range(1, sMax), error_AR1_mean[1:], label='AR(2)', color=cAR1, linewidth=2, antialiased = True)
#plt.fill_between(range(1, sMax), (error_AR1_mean-error_AR1_std)[1:], (error_AR1_mean+error_AR1_std)[1:], alpha=alphaValue, edgecolor=cAR1, linewidth=2, antialiased=True) #transform=trans) #offset_position="data") alpha=alphaValue,
lab = "W = %.2f, V = %.2f" % (proc_noise_std, obs_noise_std)
plt.plot(range(sMax+1)[1:-1], error_AR1_mean[1:-1], color=cAR1, linewidth=2, antialiased = True, label = lab, linestyle= linestyle)
plt.fill_between(range(sMax+1)[1:-1], (error_AR1_mean-error_AR1_std)[1:-1], (error_AR1_mean+error_AR1_std)[1:-1], alpha=alphaValue, facecolor = cAR1, edgecolor=cAR1, linewidth=2, antialiased=True) #transform=trans) #offset_position="data") alpha=alphaValue,
plt.xlabel('Number s of auto-regressive terms, past the first one')
plt.ylabel('Avg. error of AR(s), %i runs' % noRuns )
plt.ylim(0, 1.5)
plt.legend()
plt.savefig(pp, format='pdf')
pp.close()
def testSeqD0(noRuns = 100):
plain = False
lr = True
if plain:
ts = time_series(matlabfile = './OARIMA_code_data/data/setting6.mat', varname="seq_d0")
T = len(ts.outputs)
testIdentification(ts, "seq0-complete", noRuns, T, 5, sequenceLabel = "seq_d0", haveSpectral = False)
T = min(20000, len(ts.outputs))
testIdentification(ts, "seq0-20000", noRuns, T, 5, sequenceLabel = "seq_d0", haveSpectral = False)
T = min(2000, len(ts.outputs))
testIdentification(ts, "seq0-2000", noRuns, T, 5, sequenceLabel = "seq_d0", haveSpectral = False)
T = min(200, len(ts.outputs))
testIdentification(ts, "seq0-200", noRuns, T, 5, sequenceLabel = "seq_d0", haveSpectral = False)
T = min(100, len(ts.outputs))
testIdentification(ts, "seq0-short-k5", 1, T, 5, sequenceLabel = "seq_d0")
#testIdentification(ts, "seq0-short-k50", 1, T, 50, 27, 37, sequenceLabel = "seq_d0")
#testIdentification(ts, "seq0-short-k5", 1, T, 5, sequenceLabel = "seq_d0")
#testIdentification(ts, "seq0-short-k50", 1, T, 50, sequenceLabel = "seq_d0")
if lr:
ts = time_series(matlabfile = './OARIMA_code_data/data/setting6.mat', varname="seq_d0")
ts.logratio()
T = len(ts.outputs) # has to go after the log-ratio truncation by one
testIdentification(ts, "logratio-complete", noRuns, T, 5, sequenceLabel = "lr_d0", haveSpectral = False)
T = min(20000, len(ts.outputs))
testIdentification(ts, "logratio-20000", noRuns, T, 5, sequenceLabel = "lr_d0", haveSpectral = False)
T = min(2000, len(ts.outputs))
testIdentification(ts, "logratio-2000", noRuns, T, 5, sequenceLabel = "lr_d0", haveSpectral = False)
T = min(200, len(ts.outputs))
testIdentification(ts, "logratio-200", noRuns, T, 5, sequenceLabel = "lr_d0", haveSpectral = False)
T = min(100, len(ts.outputs))
testIdentification(ts, "logratio-short-k5", noRuns, T, 5, sequenceLabel = "lr_d0")
def test_AR():
ts = time_series(matlabfile = './OARIMA_code_data/data/setting6.mat', varname="seq_d0")
T = min(100, len(ts.outputs))
s=10
D=10.
theta = [0 for i in range(s)]
for t in range(s,T):
eta = pow(float(t),-0.5)
Y = ts.outputs[t]
loss = cost_AR(theta, Y, list(reversed(ts.outputs[t-s:t])))
grad = gradient_AR(theta, Y, list(reversed(ts.outputs[t-s:t])))
print("Loss: at time step %d :" % (t), loss)
theta = [theta[i] -eta*grad[i] for i in range(len(theta))] #gradient step
norm_theta = np.linalg.norm(theta)
if norm_theta>D: theta = [D*i/norm_theta for i in theta] #projection step
version = "FinalAAAI"
version = "Working"
version = "Extended"
if __name__ == '__main__':
try:
close_all_figs()
if version == "Extended":
# The following calls adds the plots for the extended version
testSeqD0()
if version == "FinalAAAI":
# These calls produce the AAAI 2019 figures (8-page version)
testIdentification2(500, noRuns = 100, sChoices = [1], haveKalman = True, haveSpectral = True)
testNoiseImpact()
testImpactOfS()
if version == "Working":
# These calls produce illuminating plots, which did not make it into the final 8-page version of the paper.
None
#testIdentification2(T = 100, noRuns = 10, haveSpectral = True)
#testIdentification2(200, 10, haveSpectral = False)
#timeSeqD0()
#testSisoInvariantShort(100)
#testIdentification2(100)
#testSeqD0()
#timeSeqD0()
#testSeqD1()
#testSeqD2()
#testSisoInvariantLong()
#testSYSID()
#gradient_AR_test(0)
#test_AR()
#transition = np.matrix([[1.,-0.8],[-.6,.3]])
#observation = np.matrix([[1.0,1.0]])
#testIdentification2(20, noRuns = 100, sChoices = [1], haveKalman = True, haveSpectral = True, G = transition, F_dash = observation)
except (KeyboardInterrupt, SystemExit):
raise
except:
print(" Error: ")
print(traceback.format_exc())
``` |
{
"source": "jmares/beyond_basic_stuff_python",
"score": 4
} |
#### File: beyond_basic_stuff_python/ch14/towerofhanoi.py
```python
import copy
import sys
TOTAL_DISKS = 5 # More disks means a more difficult puzzle
# Start with all disk on tower A:
SOLVED_TOWER = list(range(TOTAL_DISKS, 0, -1))
def main():
"""Runs a single game of The Tower of Hanoi."""
print("""THE TOWER OF HANOI, by <NAME> <EMAIL>
Move the tower of disks, one disk at a time, to another tower. Larger
disks cannot rest on top of a smaller disk.
More info at https://en.wikipedia.org/wiki/Tower_of_Hanoi
""")
"""The towers dictionary has keys "A", "B", and "C" and values
that are lists representing a tower of disks. The list contains
integers representing disks of different sizes, and the start of
the list is the bottom of the tower. For a game with 5 disks,
the list [5, 4, 3, 2, 1] represents a completed tower. The blank
list [] represents a tower of no disks. The list [1, 3] has a
larger disk on top of a smaller disk and is an invalid
configuration. The list [3, 1] is allowed since smaller disks
can go on top of larger ones."""
towers = {"A": copy.copy(SOLVED_TOWER), "B": [], "C": []}
while True: # Run a single turn on each iteration of this loop
# Display the towers and disks:
display_towers(towers)
# As the user for a move
from_tower, to_tower = get_player_move(towers)
# Move the top disk from form_tower to to_tower
disk = towers[from_tower].pop()
towers[to_tower].append(disk)
# Check if the user has solved the puzzle:
if SOLVED_TOWER in (towers["B"], towers["C"]):
display_towers(towers) # Display the towers one last time
print("You have solved the puzzle! Well done!")
sys.exit()
def get_player_move(towers):
"""Asks the player for a move. Returns (fromTower, toTower)."""
while True: # Keep asking player until they enter a valid move.
print('Enter the letters of "from" and "to" towers, or QUIT.')
print("(e.g., AB to moves a disk from tower A to tower B.)")
print()
response = input("> ").upper().strip()
if response == "QUIT":
print("Thanks for playing!")
sys.exit()
# Make sure the user entered valid tower letters:
if response not in ("AB", "AC", "BA", "BC", "CA", "CB"):
print("Enter one of AB, AC, BA, BC, CA, or CB.")
continue # Ask player again for their move.
# Use more descriptive variable names:
from_tower, to_tower = response[0], response[1]
if len(towers[from_tower]) == 0:
# The "from" tower cannot be an empty tower:
print("You selected a tower with no disks.")
continue # Ask player again for their move.
elif len(towers[to_tower]) == 0:
# Any disk can be moved onto an empty "to" tower:
return from_tower, to_tower
elif towers[to_tower][-1] < towers[from_tower][-1]:
print("Can't put larger disks on top of smaller ones.")
continue # Ask player again for their move.
else:
# This is a valid move, so return the selected towers:
return from_tower, to_tower
def display_towers(towers):
"""Display the three towers with their disks."""
# Display the three towers:
for level in range(TOTAL_DISKS, -1, -1):
for tower in (towers["A"], towers["B"], towers["C"]):
if level >= len(tower):
display_disk(0) # Display the bare pole with no disk
else:
display_disk(tower[level]) # Display the disk
print()
# Display the tower labels A, B and C:
empty_space = " " * (TOTAL_DISKS)
print("{0} A{0}{0} B{0}{0} C\n".format(empty_space))
def display_disk(width):
"""Display a disk of the given width. A width of 0 means no disk."""
empty_space = " " * (TOTAL_DISKS - width)
if width == 0:
# Display a pole segment without a disk:
print(f"{empty_space}||{empty_space}", end="")
else:
# Display the disk:
disk = "@" * width
numLabel = str(width).rjust(2, "_")
print(f"{empty_space}{disk}{numLabel}{disk}{empty_space}", end="")
# If this program was run (instead of imported), run the game:
if __name__ == "__main__":
main()
``` |
{
"source": "jmares/tweeps",
"score": 4
} |
#### File: jmares/tweeps/create_db.py
```python
import sqlite3
from sqlite3 import Error
from config import DB_FILE
def create_dbconnection(db_file):
"""
Create a database connection to the SQLite database specified by db_file
Parameters:
db_file (str): path to database file
Returns:
SQLite3 connection or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return conn
def create_table(conn, ddl_sql):
"""Create a table from a given DDL statement
Parameters:
conn: SQLite3 Connection
ddl_sql (str): a CREATE TABLE statement
Returns:
boolean
"""
try:
c = conn.cursor()
c.execute(ddl_sql)
except Error as e:
print(e)
return False
return True
def main():
"""Create database with two tables for the Tweeps project"""
sql_create_followers = """CREATE TABLE IF NOT EXISTS followers(
twitter_id INTEGER PRIMARY KEY,
screen_name TEXT UNIQUE,
start_date TEXT NOT NULL,
last_date TEXT NOT NULL
); """
sql_create_friends = """CREATE TABLE IF NOT EXISTS friends(
twitter_id INTEGER PRIMARY KEY,
screen_name TEXT UNIQUE,
start_date TEXT NOT NULL,
last_date TEXT NOT NULL
); """
# create a database connection
conn = create_dbconnection(DB_FILE)
# create tables
if conn is not None:
create_table(conn, sql_create_followers)
create_table(conn, sql_create_friends)
else:
print("Error! cannot create the database connection.")
# If this program was run (instead of imported), run:
if __name__ == '__main__':
main()
``` |
{
"source": "jmarfr/netatmo",
"score": 3
} |
#### File: netatmo/netatmo/__init__.py
```python
import time
import requests
import logging
from http.client import HTTPConnection
# log = logging.getLogger("urllib3")
# log.setLevel(logging.DEBUG)
#
# HTTPConnection.debuglevel = 1
# TODO: Save token locally to reuse it. save/load methods needed
class Netatmo:
def __init__(self, client_id, client_secret, username, password, scope=None):
self._client_id = client_id
self._client_secret = client_secret
self._username = username
self._password = password
self.scope = scope
self.token_expiration_time = None
self._access_token = None
self._refresh_token = None
self.base_url = "https://api.netatmo.com"
self._get_token()
def _get_token(self):
"""Get oauth token from Netatmo API."""
if self._access_token and self.token_expiration_time > time.time():
return self._access_token
if self.token_expiration_time is not None and \
self.token_expiration_time < time.time():
# Need to renew token
data = {
"grant_type": "refresh_token",
"refresh_token": self._refresh_token,
"client_id": self._client_id,
"client_secret": self._client_secret
}
else:
data = {
"grant_type": "password",
"client_id": self._client_id,
"client_secret": self._client_secret,
"username": self._username,
"password": <PASSWORD>
}
if self.scope is not None:
data["scope"] = self.scope
r = requests.post(f"{self.base_url}/oauth2/token", data=data)
r.raise_for_status()
json_resp = r.json()
self._access_token = json_resp['access_token']
self._refresh_token = json_resp['refresh_token']
self.token_expiration_time = json_resp['expire_in'] + time.time()
return self._access_token
# TODO add GET/POST method here
``` |
{
"source": "jmargeta/awesome-pytorch-hub",
"score": 2
} |
#### File: jmargeta/awesome-pytorch-hub/hubconf.py
```python
dependencies = ['mxnet', 'torch', 'torchvision', 'pretrainedmodels', 'efficientnet_pytorch']
from efficientnet_pytorch import EfficientNet
from gluoncv.model_zoo import mask_rcnn_fpn_resnet101_v1d_coco as mask_rcnn_fpn_resnet101_v1d_coco_mxnet
from pretrainedmodels.models import pnasnet5large
from torchvision.models import mobilenet_v2, shufflenet_v2_x1_0
from mxnet.gluon.model_zoo.vision import mobilenet_v2_1_0 as mobilenet_v2_1_0_mxnet
# See also:
# https://paperswithcode.com/sota/image-classification-on-imagenet
# https://github.com/Cadene/pretrained-models.pytorch
# https://github.com/osmr/imgclsmob
# https://gluon-cv.mxnet.io/model_zoo/index.html
def object_recognition(pretrained=False, **kwargs):
"""Image recognition model balancing good performance and model size"""
model_name = kwargs.pop('model_name', 'efficientnet-b2')
if pretrained is True:
model = EfficientNet.from_pretrained(model_name, **kwargs)
else:
model = EfficientNet.from_name(model_name, **kwargs)
return model
def object_recognition_sota(pretrained=False, **kwargs):
"""State of the art object recognition model"""
num_classes = 1000
if pretrained is True:
pretrained_dataset = 'imagenet'
model = pnasnet5large(pretrained='imagenet', num_classes=num_classes, **kwargs)
else:
model = pnasnet5large(num_classes=num_classes, **kwargs)
return model
def object_recognition_faster(pretrained=False, **kwargs):
"""Faster image recognition suitable for mobile devices"""
return shufflenet_v2_x1_0(pretrained=pretrained, **kwargs)
def object_recognition_faster_mxnet(pretrained=False, **kwargs):
"""Faster object recognition with mxnet mobilenet_v2_1_0 backend"""
return mobilenet_v2_1_0_mxnet(pretrained=pretrained, **kwargs)
def instance_segmentation_mxnet(pretrained=False, **kwargs):
"""Instance segmentation with mxnet mobilenet_v2_1_0 backend"""
return mask_rcnn_fpn_resnet101_v1d_coco_mxnet(pretrained=pretrained, **kwargs)
``` |
{
"source": "jmargeta/data-science-types",
"score": 3
} |
#### File: data-science-types/tools/code_template.py
```python
from pathlib import Path
import re
from typing import Optional, Dict, Union, List
ReplacementType = Union[str, int, List[Union[str, int]]]
class CodeTemplate:
"""Match ___identifier___ and replace with value in env
If this identifier is at the beginning of whitespace on a line and its value is a list then it
is treated as block subsitution by indenting to that depth and putting each element of the list
on its own line. If the identifier is on a line starting with non-whitespace and a list
then it is comma separated. ___foo___, will insert a comma after the list, if the list is not
empty.
"""
# Python 2.7.5 has a bug where the leading (^[^\n\S]*)? does not work,
# workaround via appending another [^\n\S]? inside
substitution_str = r"(^[^\n\S]*[^\n\S]?)?___([^\d\W]\w*)___(\,?)"
# older versions of Python have a bug where \w* does not work,
# so we need to replace with the non-shortened version [a-zA-Z0-9_]*
# https://bugs.python.org/issue18647
substitution_str = substitution_str.replace(r"\w", r"[a-zA-Z0-9_]")
subtitution = re.compile(substitution_str, re.MULTILINE)
@classmethod
def from_file(cls, filename: Path) -> "CodeTemplate":
with filename.open("r") as f:
return cls(f.read())
def __init__(self, pattern: str):
self.pattern = pattern
@staticmethod
def indent_lines(indent: str, v: List[Union[str, int]], after: str) -> str:
return "\n".join([indent + l + after for e in v for l in str(e).splitlines()]) # .rstrip()
def substitute(
self, env_: Optional[Dict[str, ReplacementType]] = None, **kwargs: ReplacementType
) -> str:
env = env_ or {}
def replace(match: "re.Match") -> str:
indent = match.group(1)
key = match.group(2)
trailing_comma = match.group(3)
# lookup
v = kwargs[key] if key in kwargs else env[key]
if indent is not None:
if not isinstance(v, list):
v = [v]
return self.indent_lines(indent, v, trailing_comma.rstrip())
elif isinstance(v, list):
middle = ", ".join([str(x) for x in v])
if len(v) == 0:
return middle
return middle + trailing_comma
else:
return str(v)
return self.subtitution.sub(replace, self.pattern)
if __name__ == "__main__":
pattern = """\
def plot(___text_args___, ___label_args___)
def bar(
___text_args___
___label_args___
)
def scatter(
___text_args___,
___label_args___,
)
"""
c = CodeTemplate(pattern)
print(
c.substitute(
text_args=["fontsize: int", "tickness: float"],
label_args=["size: float", "color: str"],
)
)
``` |
{
"source": "jmargeta/pendulum",
"score": 2
} |
#### File: jmargeta/pendulum/build.py
```python
import os
import sys
from distutils.command.build_ext import build_ext
from distutils.core import Extension
from distutils.errors import CCompilerError
from distutils.errors import DistutilsExecError
from distutils.errors import DistutilsPlatformError
# C Extensions
with_extensions = os.getenv("PENDULUM_EXTENSIONS", None)
if with_extensions == "1" or with_extensions is None:
with_extensions = True
if with_extensions == "0" or hasattr(sys, "pypy_version_info"):
with_extensions = False
extensions = []
if with_extensions:
extensions = [
Extension("pendulum._extensions._helpers", ["pendulum/_extensions/_helpers.c"]),
Extension("pendulum.parsing._iso8601", ["pendulum/parsing/_iso8601.c"]),
]
class BuildFailed(Exception):
pass
class ExtBuilder(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except (DistutilsPlatformError, FileNotFoundError):
print("************************************************************")
print("Cannot compile C accelerator module, use pure python version")
print("************************************************************")
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError, DistutilsPlatformError, ValueError):
print("************************************************************")
print("Cannot compile C accelerator module, use pure python version")
print("************************************************************")
def build(setup_kwargs):
"""
This function is mandatory in order to build the extensions.
"""
setup_kwargs.update(
{"ext_modules": extensions, "cmdclass": {"build_ext": ExtBuilder}}
)
```
#### File: tests/datetime/test_sub.py
```python
from datetime import timedelta
import pendulum
import pytest
from ..conftest import assert_datetime
def test_sub_years_positive():
assert pendulum.datetime(1975, 1, 1).subtract(years=1).year == 1974
def test_sub_years_zero():
assert pendulum.datetime(1975, 1, 1).subtract(years=0).year == 1975
def test_sub_years_negative():
assert pendulum.datetime(1975, 1, 1).subtract(years=-1).year == 1976
def test_sub_months_positive():
assert pendulum.datetime(1975, 12, 1).subtract(months=1).month == 11
def test_sub_months_zero():
assert pendulum.datetime(1975, 12, 1).subtract(months=0).month == 12
def test_sub_months_negative():
assert pendulum.datetime(1975, 12, 1).subtract(months=-1).month == 1
def test_sub_days_positive():
assert pendulum.datetime(1975, 5, 31).subtract(days=1).day == 30
def test_sub_days_zero():
assert pendulum.datetime(1975, 5, 31).subtract(days=0).day == 31
def test_sub_days_negative():
assert pendulum.datetime(1975, 5, 31).subtract(days=-1).day == 1
def test_sub_weeks_positive():
assert pendulum.datetime(1975, 5, 21).subtract(weeks=1).day == 14
def test_sub_weeks_zero():
assert pendulum.datetime(1975, 5, 21).subtract(weeks=0).day == 21
def test_sub_weeks_negative():
assert pendulum.datetime(1975, 5, 21).subtract(weeks=-1).day == 28
def test_sub_hours_positive():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(hours=1).hour == 23
def test_sub_hours_zero():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(hours=0).hour == 0
def test_sub_hours_negative():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(hours=-1).hour == 1
def test_sub_minutes_positive():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(minutes=1).minute == 59
def test_sub_minutes_zero():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(minutes=0).minute == 0
def test_sub_minutes_negative():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(minutes=-1).minute == 1
def test_sub_seconds_positive():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(seconds=1).second == 59
def test_sub_seconds_zero():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(seconds=0).second == 0
def test_sub_seconds_negative():
assert pendulum.datetime(1975, 5, 21, 0, 0, 0).subtract(seconds=-1).second == 1
def test_subtract_timedelta():
delta = timedelta(days=6, seconds=16, microseconds=654321)
d = pendulum.datetime(2015, 3, 14, 3, 12, 15, 777777)
d = d - delta
assert d.day == 8
assert d.minute == 11
assert d.second == 59
assert d.microsecond == 123456
def test_subtract_duration():
duration = pendulum.duration(
years=2, months=3, days=6, seconds=16, microseconds=654321
)
d = pendulum.datetime(2015, 3, 14, 3, 12, 15, 777777)
d = d - duration
assert 2012 == d.year
assert 12 == d.month
assert 8 == d.day
assert 3 == d.hour
assert 11 == d.minute
assert 59 == d.second
assert 123456 == d.microsecond
def test_subtract_time_to_new_transition_skipped():
dt = pendulum.datetime(2013, 3, 31, 3, 0, 0, 0, tz="Europe/Paris")
assert_datetime(dt, 2013, 3, 31, 3, 0, 0, 0)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 7200
assert dt.is_dst()
dt = dt.subtract(microseconds=1)
assert_datetime(dt, 2013, 3, 31, 1, 59, 59, 999999)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 3600
assert not dt.is_dst()
dt = pendulum.datetime(2013, 3, 10, 3, 0, 0, 0, tz="America/New_York")
assert_datetime(dt, 2013, 3, 10, 3, 0, 0, 0)
assert dt.timezone_name == "America/New_York"
assert dt.offset == -4 * 3600
assert dt.is_dst()
dt = dt.subtract(microseconds=1)
assert_datetime(dt, 2013, 3, 10, 1, 59, 59, 999999)
assert dt.timezone_name == "America/New_York"
assert dt.offset == -5 * 3600
assert not dt.is_dst()
dt = pendulum.datetime(1957, 4, 28, 3, 0, 0, 0, tz="America/New_York")
assert_datetime(dt, 1957, 4, 28, 3, 0, 0, 0)
assert dt.timezone_name == "America/New_York"
assert dt.offset == -4 * 3600
assert dt.is_dst()
dt = dt.subtract(microseconds=1)
assert_datetime(dt, 1957, 4, 28, 1, 59, 59, 999999)
assert dt.timezone_name == "America/New_York"
assert dt.offset == -5 * 3600
assert not dt.is_dst()
def test_subtract_time_to_new_transition_skipped_big():
dt = pendulum.datetime(2013, 3, 31, 3, 0, 0, 0, tz="Europe/Paris")
assert_datetime(dt, 2013, 3, 31, 3, 0, 0, 0)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 7200
assert dt.is_dst()
dt = dt.subtract(days=1)
assert_datetime(dt, 2013, 3, 30, 3, 0, 0, 0)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 3600
assert not dt.is_dst()
def test_subtract_time_to_new_transition_repeated():
dt = pendulum.datetime(2013, 10, 27, 2, 0, 0, 0, tz="Europe/Paris")
assert_datetime(dt, 2013, 10, 27, 2, 0, 0, 0)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 3600
assert not dt.is_dst()
dt = dt.subtract(microseconds=1)
assert_datetime(dt, 2013, 10, 27, 2, 59, 59, 999999)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 7200
assert dt.is_dst()
dt = pendulum.datetime(2013, 11, 3, 1, 0, 0, 0, tz="America/New_York")
assert_datetime(dt, 2013, 11, 3, 1, 0, 0, 0)
assert dt.timezone_name == "America/New_York"
assert dt.offset == -5 * 3600
assert not dt.is_dst()
dt = dt.subtract(microseconds=1)
assert_datetime(dt, 2013, 11, 3, 1, 59, 59, 999999)
assert dt.timezone_name == "America/New_York"
assert dt.offset == -4 * 3600
assert dt.is_dst()
def test_subtract_time_to_new_transition_repeated_big():
dt = pendulum.datetime(2013, 10, 27, 2, 0, 0, 0, tz="Europe/Paris")
assert_datetime(dt, 2013, 10, 27, 2, 0, 0, 0)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 3600
assert not dt.is_dst()
dt = dt.subtract(days=1)
assert_datetime(dt, 2013, 10, 26, 2, 0, 0, 0)
assert dt.timezone_name == "Europe/Paris"
assert dt.offset == 7200
assert dt.is_dst()
def test_subtract_invalid_type():
d = pendulum.datetime(1975, 5, 21, 0, 0, 0)
with pytest.raises(TypeError):
d - "ab"
with pytest.raises(TypeError):
"ab" - d
``` |
{
"source": "jmargeta/structlog",
"score": 2
} |
#### File: src/structlog/twisted.py
```python
import json
import sys
from typing import Any, Callable, Dict, Optional, Sequence, TextIO, Tuple
from twisted.python import log
from twisted.python.failure import Failure
from twisted.python.log import ILogObserver, textFromEventDict
from zope.interface import implementer
from ._base import BoundLoggerBase
from ._config import _BUILTIN_DEFAULT_PROCESSORS
from ._utils import until_not_interrupted
from .processors import JSONRenderer as GenericJSONRenderer
from .types import EventDict, WrappedLogger
class BoundLogger(BoundLoggerBase):
"""
Twisted-specific version of `structlog.BoundLogger`.
Works exactly like the generic one except that it takes advantage of
knowing the logging methods in advance.
Use it like::
configure(
wrapper_class=structlog.twisted.BoundLogger,
)
"""
def msg(self, event: Optional[str] = None, **kw: Any) -> Any:
"""
Process event and call ``log.msg()`` with the result.
"""
return self._proxy_to_logger("msg", event, **kw)
def err(self, event: Optional[str] = None, **kw: Any) -> Any:
"""
Process event and call ``log.err()`` with the result.
"""
return self._proxy_to_logger("err", event, **kw)
class LoggerFactory:
"""
Build a Twisted logger when an *instance* is called.
>>> from structlog import configure
>>> from structlog.twisted import LoggerFactory
>>> configure(logger_factory=LoggerFactory())
"""
def __call__(self, *args: Any) -> WrappedLogger:
"""
Positional arguments are silently ignored.
:rvalue: A new Twisted logger.
.. versionchanged:: 0.4.0
Added support for optional positional arguments.
"""
return log
_FAIL_TYPES = (BaseException, Failure)
def _extractStuffAndWhy(eventDict: EventDict) -> Tuple[Any, Any, EventDict]:
"""
Removes all possible *_why*s and *_stuff*s, analyzes exc_info and returns
a tuple of ``(_stuff, _why, eventDict)``.
**Modifies** *eventDict*!
"""
_stuff = eventDict.pop("_stuff", None)
_why = eventDict.pop("_why", None)
event = eventDict.pop("event", None)
if isinstance(_stuff, _FAIL_TYPES) and isinstance( # type: ignore
event, _FAIL_TYPES
):
raise ValueError("Both _stuff and event contain an Exception/Failure.")
# `log.err('event', _why='alsoEvent')` is ambiguous.
if _why and isinstance(event, str): # type: ignore
raise ValueError("Both `_why` and `event` supplied.")
# Two failures are ambiguous too.
if not isinstance(_stuff, _FAIL_TYPES) and isinstance(event, _FAIL_TYPES):
_why = _why or "error"
_stuff = event
if isinstance(event, str):
_why = event
if not _stuff and sys.exc_info() != (None, None, None):
_stuff = Failure()
# Either we used the error ourselves or the user supplied one for
# formatting. Avoid log.err() to dump another traceback into the log.
if isinstance(_stuff, BaseException) and not isinstance(_stuff, Failure):
_stuff = Failure(_stuff)
return _stuff, _why, eventDict
class ReprWrapper:
"""
Wrap a string and return it as the ``__repr__``.
This is needed for ``twisted.python.log.err`` that calls `repr` on
``_stuff``:
>>> repr("foo")
"'foo'"
>>> repr(ReprWrapper("foo"))
'foo'
Note the extra quotes in the unwrapped example.
"""
def __init__(self, string: str) -> None:
self.string = string
def __eq__(self, other: Any) -> bool:
"""
Check for equality, just for tests.
"""
return (
isinstance(other, self.__class__) and self.string == other.string
)
def __repr__(self) -> str:
return self.string
class JSONRenderer(GenericJSONRenderer):
"""
Behaves like `structlog.processors.JSONRenderer` except that it
formats tracebacks and failures itself if called with ``err()``.
.. note::
This ultimately means that the messages get logged out using ``msg()``,
and *not* ``err()`` which renders failures in separate lines.
Therefore it will break your tests that contain assertions using
`flushLoggedErrors <https://twistedmatrix.com/documents/
current/api/twisted.trial.unittest.SynchronousTestCase.html
#flushLoggedErrors>`_.
*Not* an adapter like `EventAdapter` but a real formatter. Also does *not*
require to be adapted using it.
Use together with a `JSONLogObserverWrapper`-wrapped Twisted logger like
`plainJSONStdOutLogger` for pure-JSON logs.
"""
def __call__( # type: ignore
self,
logger: WrappedLogger,
name: str,
eventDict: EventDict,
) -> Tuple[Sequence[Any], Dict[str, Any]]:
_stuff, _why, eventDict = _extractStuffAndWhy(eventDict)
if name == "err":
eventDict["event"] = _why
if isinstance(_stuff, Failure):
eventDict["exception"] = _stuff.getTraceback(detail="verbose")
_stuff.cleanFailure()
else:
eventDict["event"] = _why
return (
(
ReprWrapper(
GenericJSONRenderer.__call__( # type: ignore
self, logger, name, eventDict
)
),
),
{"_structlog": True},
)
@implementer(ILogObserver)
class PlainFileLogObserver:
"""
Write only the the plain message without timestamps or anything else.
Great to just print JSON to stdout where you catch it with something like
runit.
:param file: File to print to.
.. versionadded:: 0.2.0
"""
def __init__(self, file: TextIO) -> None:
self._write = file.write
self._flush = file.flush
def __call__(self, eventDict: EventDict) -> None:
until_not_interrupted(self._write, textFromEventDict(eventDict) + "\n")
until_not_interrupted(self._flush)
@implementer(ILogObserver)
class JSONLogObserverWrapper:
"""
Wrap a log *observer* and render non-`JSONRenderer` entries to JSON.
:param ILogObserver observer: Twisted log observer to wrap. For example
:class:`PlainFileObserver` or Twisted's stock `FileLogObserver
<https://twistedmatrix.com/documents/current/api/twisted.python.log.
FileLogObserver.html>`_
.. versionadded:: 0.2.0
"""
def __init__(self, observer: Any) -> None:
self._observer = observer
def __call__(self, eventDict: EventDict) -> str:
if "_structlog" not in eventDict:
eventDict["message"] = (
json.dumps(
{
"event": textFromEventDict(eventDict),
"system": eventDict.get("system"),
}
),
)
eventDict["_structlog"] = True
return self._observer(eventDict)
def plainJSONStdOutLogger() -> JSONLogObserverWrapper:
"""
Return a logger that writes only the message to stdout.
Transforms non-`JSONRenderer` messages to JSON.
Ideal for JSONifying log entries from Twisted plugins and libraries that
are outside of your control::
$ twistd -n --logger structlog.twisted.plainJSONStdOutLogger web
{"event": "Log opened.", "system": "-"}
{"event": "twistd 13.1.0 (python 2.7.3) starting up.", "system": "-"}
{"event": "reactor class: twisted...EPollReactor.", "system": "-"}
{"event": "Site starting on 8080", "system": "-"}
{"event": "Starting factory <twisted.web.server.Site ...>", ...}
...
Composes `PlainFileLogObserver` and `JSONLogObserverWrapper` to a usable
logger.
.. versionadded:: 0.2.0
"""
return JSONLogObserverWrapper(PlainFileLogObserver(sys.stdout))
class EventAdapter:
"""
Adapt an ``event_dict`` to Twisted logging system.
Particularly, make a wrapped `twisted.python.log.err
<https://twistedmatrix.com/documents/current/
api/twisted.python.log.html#err>`_ behave as expected.
:param dictRenderer: Renderer that is used for the actual log message.
Please note that structlog comes with a dedicated `JSONRenderer`.
**Must** be the last processor in the chain and requires a *dictRenderer*
for the actual formatting as an constructor argument in order to be able to
fully support the original behaviors of ``log.msg()`` and ``log.err()``.
"""
def __init__(
self,
dictRenderer: Optional[
Callable[[WrappedLogger, str, EventDict], str]
] = None,
) -> None:
"""
:param dictRenderer: A processor used to format the log message.
"""
self._dictRenderer = dictRenderer or _BUILTIN_DEFAULT_PROCESSORS[-1]
def __call__(
self, logger: WrappedLogger, name: str, eventDict: EventDict
) -> Any:
if name == "err":
# This aspires to handle the following cases correctly:
# - log.err(failure, _why='event', **kw)
# - log.err('event', **kw)
# - log.err(_stuff=failure, _why='event', **kw)
_stuff, _why, eventDict = _extractStuffAndWhy(eventDict)
eventDict["event"] = _why
return (
(),
{
"_stuff": _stuff,
"_why": self._dictRenderer(logger, name, eventDict),
},
)
else:
return self._dictRenderer(logger, name, eventDict)
``` |
{
"source": "jmargieh/hackathon",
"score": 3
} |
#### File: jmargieh/hackathon/hackathon.py
```python
import webapp2
import json
from events import Event
from users import User
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Whats up bitches?, remember we have a meeting on Sunday.\n see you then :D')
class BhadelPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Sho ya bhadel ?')
class Event(webapp2.RequestHandler):
def post(self):
self.response.headers.add_header("Access-Control-Allow-Origin", "*")
body = json.loads(self.request.body)
Event.createEvent(body)
self.response.write('OK')
class Register(webapp2.RequestHandler):
def post(self):
self.response.headers.add_header("Access-Control-Allow-Origin", "*")
body = json.loads(self.request.body)
User.registerUser(body)
self.response.write('OK')
app = webapp2.WSGIApplication([
('/', MainPage),
('/bhadel', BhadelPage),
('/createevent', Event),
('/register', Register),
], debug=True)
``` |
{
"source": "jmargieh/head-pose-estimation",
"score": 3
} |
#### File: head-pose-estimation/calibrarion/cap_and_calib.py
```python
import numpy as np
import cv2
def calculate (objp, imgp, grayp):
print("calculating calibration result...")
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objp, imgp, grayp.shape[::-1], None, None)
# Compute mean of reprojection error
mean_error = 0
for i in range(len(objpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
mean_error += error
with open('calib_result.txt', 'a') as the_file:
the_file.write('Camera Matrix: \n')
the_file.write(str(np.concatenate(mtx, axis=0).tolist()) + '\n')
the_file.write('Distortion Coefficients: \n')
the_file.write(str(np.concatenate(dist, axis=0).tolist()) + '\n')
the_file.write('Rotation Vector: \n')
the_file.write(str(np.concatenate(rvecs, axis=0).tolist()) + '\n')
the_file.write('Translation Vector: \n')
the_file.write(str(np.concatenate(tvecs, axis=0).tolist()) + '\n')
the_file.write("Mean reprojection error:\n" + str(mean_error / len(objpoints)) + '\n')
the_file.write('--------------------------------')
# Global variables
cap = cv2.VideoCapture(0)
global_count = 0
font = cv2.FONT_HERSHEY_SIMPLEX
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 75, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9, 3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
while True:
# Capture frame-by-frame
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if global_count % 30 != 0:
# Global count to skip frames
global_count += 1
continue
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
# If found, add object points, image points (after refining them)
if ret:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
frame = cv2.drawChessboardCorners(frame, (9, 6), corners2, ret)
cv2.imshow('img', frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
cv2.destroyAllWindows()
calculate(objpoints, imgpoints, gray)
``` |
{
"source": "jmargutt/automated-building-detection",
"score": 2
} |
#### File: abd_model/loaders/semseg.py
```python
import os
import numpy as np
import torch.utils.data
from abd_model.da.core import to_tensor
from abd_model.tiles import tiles_from_dir, tile_image_from_file, tile_label_from_file, tile_image_buffer, tile_is_neighboured
class SemSeg(torch.utils.data.Dataset):
def __init__(self, config, ts, root, cover=None, tiles_weights=None, mode=None, metatiles=False, keep_borders=False):
super().__init__()
self.mode = mode
self.config = config
self.tiles_weights = tiles_weights
self.metatiles = metatiles
self.da = True if "da" in self.config["train"].keys() and self.config["train"]["da"]["p"] > 0.0 else False
assert mode in ["train", "eval", "predict"]
path = os.path.join(root, config["channels"][0]["name"])
self.tiles_paths = [(tile, path) for tile, path in tiles_from_dir(path, cover=cover, xyz_path=True)]
if metatiles:
self.metatiles_paths = self.tiles_paths
if not keep_borders:
self.tiles_paths = [
(tile, path) for tile, path in self.metatiles_paths if tile_is_neighboured(tile, self.metatiles_paths)
]
self.cover = {tile for tile, path in self.tiles_paths}
assert len(self.tiles_paths), "Empty Dataset"
self.tiles = {}
num_channels = 0
for channel in config["channels"]:
path = os.path.join(root, channel["name"])
self.tiles[channel["name"]] = [
(tile, path) for tile, path in tiles_from_dir(path, cover=self.cover, xyz_path=True)
]
num_channels += len(channel["bands"])
self.shape_in = (num_channels,) + tuple(ts) # C,W,H
self.shape_out = (len(config["classes"]),) + tuple(ts) # C,W,H
if self.mode in ["train", "eval"]:
path = os.path.join(root, "labels")
self.tiles["labels"] = [(tile, path) for tile, path in tiles_from_dir(path, cover=self.cover, xyz_path=True)]
for channel in config["channels"]: # Order images and labels accordingly
self.tiles[channel["name"]].sort(key=lambda tile: tile[0])
self.tiles["labels"].sort(key=lambda tile: tile[0])
assert len(self.tiles), "Empty Dataset"
def __len__(self):
return len(self.tiles_paths)
def __getitem__(self, i):
tile = None
mask = None
image = None
for channel in self.config["channels"]:
image_channel = None
tile, path = self.tiles[channel["name"]][i]
bands = None if not channel["bands"] else channel["bands"]
if self.metatiles:
image_channel = tile_image_buffer(tile, self.metatiles_paths, bands)
else:
image_channel = tile_image_from_file(path, bands)
assert image_channel is not None, "Dataset channel {} not retrieved: {}".format(channel["name"], path)
image = np.concatenate((image, image_channel), axis=2) if image is not None else image_channel
if self.mode in ["train", "eval"]:
assert tile == self.tiles["labels"][i][0], "Dataset mask inconsistency"
mask = tile_label_from_file(self.tiles["labels"][i][1])
assert mask is not None, "Dataset mask not retrieved"
weight = self.tiles_weights[tile] if self.tiles_weights is not None and tile in self.tiles_weights else 1.0
image, mask = to_tensor(self.config, self.shape_in[1:3], image, mask=mask, da=self.da)
return image, mask, tile, weight
if self.mode in ["predict"]:
image = to_tensor(self.config, self.shape_in[1:3], image, resize=False, da=False)
return image, torch.IntTensor([tile.x, tile.y, tile.z])
``` |
{
"source": "jmarine/ezeeai",
"score": 3
} |
#### File: ezeeai/config/config_writer.py
```python
from ezeeai.config.config_reader import CustomConfigParser
class ConfigWriter:
def __init__(self):
self.config = CustomConfigParser()
def itemize(self, form):
result = []
for k, value in form.items():
# print(k, value)
if 'token' not in k: # csrf_token
try:
section, key = k.split('-', 1)
result.append((section.upper(), key, value))
except ValueError:
continue
return result
def populate_config(self, form):
for section, key, value in self.itemize(form):
self.add_item(section, key, value)
def add_item(self, section, key, value):
if section not in self.config.sections():
self.config.add_section(section)
self.config.set(section, key, value)
def write_config(self, path):
with open(path, 'w') as f:
self.config.write(f)
def append_config(self, path):
with open(path, 'a') as f:
self.config.write(f)
```
#### File: core/extensions/best_exporter.py
```python
from __future__ import absolute_import
import abc
import os
import json
import glob
import shutil
from tensorflow.python.estimator import gc
from tensorflow.python.estimator import util
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.estimator.exporter import Exporter, _SavedModelExporter
def _verify_compare_fn_args(compare_fn):
"""Verifies compare_fn arguments."""
args = set(util.fn_args(compare_fn))
if 'best_eval_result' not in args:
raise ValueError(
'compare_fn (%s) must include best_eval_result argument.' % compare_fn)
if 'current_eval_result' not in args:
raise ValueError(
'compare_fn (%s) must include current_eval_result argument.' %
compare_fn)
non_valid_args = list(args - set(['best_eval_result', 'current_eval_result']))
if non_valid_args:
raise ValueError('compare_fn (%s) has following not expected args: %s' %
(compare_fn, non_valid_args))
def _loss_smaller(best_eval_result, current_eval_result):
"""Compares two evaluation results and returns true if the 2nd one is smaller.
Both evaluation results should have the values for MetricKeys.LOSS, which are
used for comparison.
Args:
best_eval_result: best eval metrics.
current_eval_result: current eval metrics.
Returns:
True if the loss of current_eval_result is smaller; otherwise, False.
Raises:
ValueError: If input eval result is None or no loss is available.
"""
default_key = metric_keys.MetricKeys.LOSS
if not best_eval_result or default_key not in best_eval_result:
raise ValueError(
'best_eval_result cannot be empty or no loss is found in it.')
if not current_eval_result or default_key not in current_eval_result:
raise ValueError(
'current_eval_result cannot be empty or no loss is found in it.')
return best_eval_result[default_key] > current_eval_result[default_key]
class BestExporter(Exporter):
"""This class exports the serving graph and checkpoints of the best models.
This class performs a model export everytime when the new model is better
than any exsiting model.
"""
def __init__(self,
name='best_exporter',
serving_input_receiver_fn=None,
event_file_pattern='eval/*.tfevents.*',
compare_fn=_loss_smaller,
assets_extra=None,
as_text=False,
exports_to_keep=5):
"""Create an `Exporter` to use with `tf.estimator.EvalSpec`.
Example of creating a BestExporter for training and evluation:
```python
def make_train_and_eval_fn():
# Set up feature columns.
categorial_feature_a = (
tf.feature_column.categorical_column_with_hash_bucket(...))
categorial_feature_a_emb = embedding_column(
categorical_column=categorial_feature_a, ...)
... # other feature columns
estimator = tf.estimator.DNNClassifier(
config=tf.estimator.RunConfig(
model_dir='/my_model', save_summary_steps=100),
feature_columns=[categorial_feature_a_emb, ...],
hidden_units=[1024, 512, 256])
serving_feature_spec = tf.feature_column.make_parse_example_spec(
categorial_feature_a_emb)
serving_input_receiver_fn = (
tf.estimator.export.build_parsing_serving_input_receiver_fn(
serving_feature_spec))
exporter = tf.estimator.BestExporter(
name="best_exporter",
serving_input_receiver_fn=serving_input_receiver_fn,
exports_to_keep=5)
train_spec = tf.estimator.TrainSpec(...)
eval_spec = [tf.estimator.EvalSpec(
input_fn=eval_input_fn,
steps=100,
exporters=exporter,
start_delay_secs=0,
throttle_secs=5)]
return tf.estimator.DistributedTrainingSpec(estimator, train_spec,
eval_spec)
```
Args:
name: unique name of this `Exporter` that is going to be used in the
export path.
serving_input_receiver_fn: a function that takes no arguments and returns
a `ServingInputReceiver`.
event_file_pattern: event file name pattern relative to model_dir. If
None, however, the exporter would not be preemption-safe. To bex
preemption-safe, event_file_pattern should be specified.
compare_fn: a function that compares two evaluation results and returns
true if current evaluation result is better. Follows the signature:
* Args:
* `best_eval_result`: This is the evaluation result of the best model.
* `current_eval_result`: This is the evaluation result of current
candidate model.
* Returns:
True if current evaluation result is better; otherwise, False.
assets_extra: An optional dict specifying how to populate the assets.extra
directory within the exported SavedModel. Each key should give the
destination path (including the filename) relative to the assets.extra
directory. The corresponding value gives the full path of the source
file to be copied. For example, the simple case of copying a single
file without renaming it is specified as `{'my_asset_file.txt':
'/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format. Defaults to
`False`.
exports_to_keep: Number of exports to keep. Older exports will be
garbage-collected. Defaults to 5. Set to `None` to disable garbage
collection.
Raises:
ValueError: if any arguments is invalid.
"""
self._compare_fn = compare_fn
if self._compare_fn is None:
raise ValueError('`compare_fn` must not be None.')
_verify_compare_fn_args(self._compare_fn)
self._saved_model_exporter = _SavedModelExporter(
name, serving_input_receiver_fn, assets_extra, as_text)
self._event_file_pattern = event_file_pattern
self._model_dir = None
self._best_eval_result = None
self._exports_to_keep = exports_to_keep
self._log = {}
if exports_to_keep is not None and exports_to_keep <= 0:
raise ValueError(
'`exports_to_keep`, if provided, must be positive number')
@property
def name(self):
return self._saved_model_exporter.name
def export(self, estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
export_result = None
if self._model_dir != estimator.model_dir and self._event_file_pattern:
# Loads best metric from event files.
tf_logging.info('Loading best metric from event files.')
self._model_dir = estimator.model_dir
full_event_file_pattern = os.path.join(self._model_dir,
self._event_file_pattern)
self._best_eval_result = self._get_best_eval_result(
full_event_file_pattern)
if os.path.isfile(os.path.join(export_path, 'export.log')):
self._log = {}
try:
self._log = json.load(open(os.path.join(export_path, 'export.log'), 'r'))
except json.JSONDecodeError:
pass
if len(self._log) == 0:
self._best_eval_result = None
if self._best_eval_result is None or self._compare_fn(
best_eval_result=self._best_eval_result,
current_eval_result=eval_result):
tf_logging.info('Performing best model export.')
self._best_eval_result = eval_result
export_result = self._saved_model_exporter.export(
estimator, export_path, checkpoint_path, eval_result,
is_the_final_export)
export_result_path = export_result.decode("utf-8")
self._log[export_result_path] = {k: float(v) for k, v in eval_result.items()}
self._copy_checkpoint(checkpoint_path, export_result_path, eval_result["global_step"])
self._garbage_collect_exports(export_path)
with open(os.path.join(export_path, 'export.log'), 'w') as fp:
json.dump(self._log, fp)
return export_result
def _copy_checkpoint(self, checkpoint_pattern, dest_path, step):
for file in glob.glob(checkpoint_pattern + '*'):
shutil.copy(file, dest_path)
with open(os.path.join(dest_path, 'checkpoint'), 'w') as fp:
text = 'model_checkpoint_path: "model.ckpt-number"\n'.replace('number', str(step))
fp.write(text)
fp.close()
def _garbage_collect_exports(self, export_dir_base):
"""Deletes older exports, retaining only a given number of the most recent.
Export subdirectories are assumed to be named with monotonically increasing
integers; the most recent are taken to be those with the largest values.
Args:
export_dir_base: the base directory under which each export is in a
versioned subdirectory.
"""
if self._exports_to_keep is None:
return
def _export_version_parser(path):
# create a simple parser that pulls the export_version from the directory.
filename = os.path.basename(path.path)
if not (len(filename) == 10 and filename.isdigit()):
return None
return path._replace(export_version=int(filename))
# pylint: disable=protected-access
keep_filter = gc._largest_export_versions(self._exports_to_keep)
delete_filter = gc._negation(keep_filter)
for p in delete_filter(
gc._get_paths(export_dir_base, parser=_export_version_parser)):
try:
del self._log[p.path]
gfile.DeleteRecursively(p.path)
except errors_impl.NotFoundError as e:
tf_logging.warn('Can not delete %s recursively: %s', p.path, e)
# pylint: enable=protected-access
def _get_best_eval_result(self, event_files):
"""Get the best eval result from event files.
Args:
event_files: Absolute pattern of event files.
Returns:
The best eval result.
"""
if not event_files:
return None
event_count = 0
best_eval_result = None
for event_file in gfile.Glob(os.path.join(event_files)):
for event in summary_iterator.summary_iterator(event_file):
if event.HasField('summary'):
event_eval_result = {}
for value in event.summary.value:
if value.HasField('simple_value'):
event_eval_result[value.tag] = value.simple_value
if event_eval_result:
if best_eval_result is None or self._compare_fn(
best_eval_result, event_eval_result):
event_count += 1
best_eval_result = event_eval_result
if event_count < 2:
return None
return best_eval_result
```
#### File: core/model/model_builder.py
```python
import tensorflow as tf
from .custom_estimators import regressor, classifier, binary_classifier
class ModelBuilder:
def get_model(self, params):
if 'label_dimension' in params or params['n_classes'] == 0:
return regressor
if params['n_classes'] > 2:
return classifier
return binary_classifier
def create_from_canned(self, params):
params['mode'] = 'canned_dnn' if 'hidden_units' in params else 'canned_linear'
params['loss_function'] = params['loss_function_canned']
model_fn = self.get_model(params)
return tf.estimator.Estimator(model_fn=model_fn, params=params, config=params['config'],
model_dir=params['checkpoint_dir'])
def create_from_keras(self, params):
params['mode'] = 'custom'
model_fn = self.get_model(params)
return tf.estimator.Estimator(model_fn=model_fn, params=params, config=params['config'],
model_dir=params['checkpoint_dir'])
```
#### File: ezeeai/data/tabular.py
```python
from functools import reduce
from sklearn.model_selection import train_test_split
from tensorflow.python.feature_column.feature_column_v2 import IndicatorColumn
from .utils.tabular import make_csv_dataset
from .feature_selection import FeatureSelection
from ..utils import args
import pandas as pd
import itertools
import os
import tensorflow as tf
import numpy as np
from ..utils.feature_util import get_feature_key, get_feature_names
class Tabular:
SAMPLE_DATA_SIZE = 5
MAX_CATEGORICAL_SIZE = 2000
MAX_RANGE_SIZE = 257
MIN_RANGE_SIZE = 1
def __init__(self, name, file):
self._name = None
self._file = None
self._train_file = None
self._validation_file = None
self._test_file = None
self._df = None
self._normalize = False
self._fs = None
self._defaults = None
self._converted_defaults = None
self._keyed_defaults = None
self._categories = None # session -> category_list
self._column_categories = None
self._summary = None # session -> data
self._targets = None
self._all_feature_columns = None # session -> all_features
self._feature_columns = None # session -> features
self._feature_names = None
self._train_size = None
self._split = None
self.set_file(file)
self.set_name(name)
self.load_features()
self._base_path = self._file.replace(self._name + '.csv', '')
def set_max_categorical_size(self, value):
self.MAX_CATEGORICAL_SIZE = value
def set_max_range_size(self, value):
self.MAX_RANGE_SIZE = value
def set_min_range_size(self, value):
self.MIN_RANGE_SIZE = value
def set_sample_data_size(self, value):
self.SAMPLE_DATA_SIZE = value
def set_name(self, name):
args.assert_type(str, name)
self._name = name
def get_name(self):
return self._name
def set_file(self, file):
args.assert_file(file)
self._file = file
def get_file(self):
return self._file
def set_base_path(self, path):
args.assert_folder(path)
self._base_path = path
def get_base_path(self):
return self._base_path
def set_train_file(self, file):
args.assert_file(file)
self._train_file = file
def get_train_file(self):
return self._train_file
def set_validation_file(self, file):
args.assert_file(file)
self._validation_file = file
def get_validation_file(self):
return self._validation_file
def set_test_file(self, file):
# args.assert_file(file)
if file:
args.assert_type((str, list), file)
self._test_file = file
def get_test_file(self):
return self._test_file
def get_df(self):
return self._df
def set_df(self, df):
args.assert_type(pd.DataFrame, df)
self._df = df
def get_normalize(self):
return self._normalize
def set_normalize(self, norm):
args.assert_type(bool, norm)
self._normalize = norm
def get_feature_selection(self):
return self._fs
def set_feature_selection(self, fs):
args.assert_type(FeatureSelection, fs)
self._fs = fs
def get_defaults(self):
return self._defaults
def get_converted_defaults(self):
return self._converted_defaults
def get_keyed_defaults(self):
return self._keyed_defaults
def set_defaults(self, defaults):
args.assert_type(dict, defaults)
self._defaults = defaults
self.update_converted_defaults()
def get_categories(self):
return self._categories
def set_categories(self, categories):
args.assert_type(list, categories)
self._categories = categories
def get_column_categories(self):
return self._column_categories
def set_column_categories(self, categories):
args.assert_type(dict, categories)
self._column_categories = categories
def get_data_summary(self):
return self._summary
def set_data_sumary(self, ds):
args.assert_type(pd.DataFrame, ds)
self._summary = ds
def get_targets(self):
return self._targets
def set_targets(self, targets):
args.assert_type(list, targets)
self._targets = targets
def get_feature_columns(self):
return self._feature_columns
def get_feature_names(self):
return self._feature_names
def set_feature_columns(self, fc):
args.assert_type(list, fc)
self._feature_columns = fc
self._feature_names = get_feature_names(self.get_feature_columns())
def get_all_feature_columns(self):
return self._all_feature_columns
def set_all_feature_columns(self, fc):
args.assert_type(list, fc)
self._all_feature_columns = fc
def set_train_size(self):
if self._train_file is not None:
self._train_size = len(pd.read_csv(self._train_file))
def get_train_size(self):
if self._train_file is not None:
if self._train_size is None:
self.set_train_size()
return self._train_size
return None
def get_split(self):
return self._split
def set_split(self, split):
args.assert_type(str, split)
self._split = split
def _assign_category(self):
fs = FeatureSelection(self.get_df(), self.MAX_CATEGORICAL_SIZE, self.MAX_RANGE_SIZE, self.MIN_RANGE_SIZE)
self.set_feature_selection(fs)
category_list, unique_values, default_list, frequent_values2frequency = fs.assign_category(self.get_df())
return category_list, unique_values, default_list, frequent_values2frequency
def _insert_data_summary(self, unique_values, default_list, frequent_values2frequency, sample_data_size):
df = self.get_df()
categories = self.get_categories()
df = df.apply(lambda x: x.fillna(x.mode()), axis=0)
data = df.head(sample_data_size).T
data.insert(0, 'Defaults', default_list.values())
data.insert(0, '(most frequent, frequency)', frequent_values2frequency.values())
data.insert(0, 'Unique Values', unique_values)
data.insert(0, 'Category', categories)
sample_column_names = ["Sample {}".format(i) for i in range(1, sample_data_size + 1)]
data.columns = list(
itertools.chain(['Category', '#Unique Values', '(Most frequent, Frequency)', 'Defaults'],
sample_column_names))
return data
def get_new_features(self, form):
fs_list = self.get_feature_selection().group_by(self.get_categories())['none']
new_features = {}
for k, v in self.get_defaults().items():
if k not in fs_list:
new_features[k] = form[k] if k not in self.get_targets() else self.get_defaults()[k]
return new_features
def load_features(self):
self.set_df(pd.read_csv(self.get_file()))
df = self.get_df()
df.reset_index(inplace=True, drop=True)
categories, unique_values, default_list, frequent_values2frequency = self._assign_category()
self.set_categories(categories)
self.set_data_sumary(self._insert_data_summary(unique_values, default_list, frequent_values2frequency,
self.SAMPLE_DATA_SIZE))
default_values = [str(v) for v in default_list.values()]
self.set_defaults(dict(zip(self.get_data_summary().index.tolist(), default_values)))
def update_targets(self, targets):
fs = self.get_feature_selection()
categories = self.get_categories()
summary_data = self.get_data_summary()
df = self.get_df()
for t in targets:
if summary_data.Category[t] == 'hash':
return 'Hash features are not allowed as target.'
if len(targets) > 1:
for t in targets:
if summary_data.Category[t] != 'numerical':
return 'Only numerical features are supported for multiouput.'
self.set_targets(targets)
if len(targets) == 1:
target_type = summary_data.Category[targets[0]]
if target_type == 'range':
new_categ_list = []
for categ, feature in zip(categories, df.columns):
new_categ_list.append(categ if feature != targets[0] else 'categorical')
self.set_categories(new_categ_list)
summary_data.Category = new_categ_list
fs.update(categories, dict(zip(summary_data.index.tolist(), summary_data.Defaults)))
return ''
def update_feature_columns(self):
categories = self.get_categories()
fs = self.get_feature_selection()
training_path = self.get_train_file()
targets = self.get_targets()
self.set_feature_columns(fs.create_tf_features(categories, targets, self.get_normalize(), training_path))
self.set_all_feature_columns(fs.create_tf_features(categories, targets, self.get_normalize(), training_path,
without_label=False))
def update_features(self, cat_columns, default_values):
old_categories = self.get_categories()
self.set_categories(cat_columns)
for i in range(len(cat_columns)):
if 'none' in cat_columns[i]:
cat_columns[i] = 'none'
summary_data = self.get_data_summary()
summary_data.Category = cat_columns
default_values = [str(v) for v in default_values]
summary_data.Defaults = default_values
self.set_defaults(dict(zip(summary_data.index.tolist(), default_values)))
self.get_feature_selection().update(cat_columns, dict(zip(summary_data.index.tolist(), default_values)))
column_categories = {}
for label, old_cat in zip(summary_data.index, old_categories):
new_cat = summary_data.Category[label] # if data.Category[label] != 'range' else 'int-range'
cat = new_cat + '-' + old_cat.replace('none-', '') if 'none' in new_cat else new_cat
column_categories[label] = cat
self.set_column_categories(column_categories)
def split_dataset(self, percent=None):
percent = percent or self.get_split()
self.set_split(percent)
file = self.get_file()
basename = os.path.basename(file)
train_file = os.path.join(file.rstrip(basename), 'train', basename)
validation_file = os.path.join(file.rstrip(basename), 'valid', basename)
percent = percent.split(',')
percent = (int(percent[0]), int(percent[1]), int(percent[2]))
targets = self.get_targets()
df = self.get_df()
stratify = None
val_frac = percent[1] / 100
if len(targets) == 1 and self.get_df()[targets[0]].dtype == 'object':
counts = df[targets[0]].value_counts()
df = df[df[targets[0]].isin(counts[counts > 1].index)]
stratify = df[[targets[0]]]
train_df, val_df = train_test_split(df, test_size=val_frac, stratify=stratify, random_state=42)
if percent[2] != 0:
pre = os.path.join(file.rstrip(basename), 'test', basename).split('.')
test_file = f'{pre[0]}_split_test.{pre[1]}'
test_size = int(round((percent[2] / 100) * len(df)))
if len(targets) == 1 and self.get_df()[targets[0]].dtype == 'object':
counts = train_df[targets[0]].value_counts()
train_df = train_df[train_df[targets[0]].isin(counts[counts > 1].index)]
stratify = train_df[[targets[0]]]
train_df, test_df = train_test_split(train_df, test_size=test_size, stratify=stratify, random_state=42)
test_df.to_csv(test_file, index=False)
self.set_test_file(test_file)
train_df.to_csv(train_file, index=False)
val_df.to_csv(validation_file, index=False)
self.set_train_file(train_file)
self.set_validation_file(validation_file)
def get_params(self):
return {'name': self.get_name(), 'split': self.get_split(), 'targets': self.get_targets(),
'category_list': self.get_column_categories(), 'normalize': self.get_normalize()}
def get_num_outputs(self):
targets = self.get_targets()
df = self.get_df()
if len(targets) > 1:
return len(targets)
if df[targets[0]].dtype == "object":
if len(df[targets[0]].unique()) <= 2:
return 1
return len(df[targets[0]].unique())
return 1
def get_num_inputs(self):
fs_list = self.get_feature_selection().group_by(self.get_categories())['none']
filtered = [f for f in self.get_feature_columns() if get_feature_key(f) not in fs_list]
# filter feature_columns
shapes = [x._variable_shape.num_elements() for x in filtered]
return reduce(lambda x, y: x + y, shapes)
def get_target_labels(self):
if len(self.get_targets()) > 1:
return None
target = self.get_targets()[0]
target_type = self.get_data_summary().Category[target]
fs = self.get_feature_selection()
if target_type == 'categorical' or target_type == 'hash':
return fs.cat_unique_values_dict[target]
elif 'range' in target_type:
return [str(a) for a in list(range(min(fs.df[target].values), max(fs.df[target].values)))]
return None
def get_dtypes(self):
return self.get_feature_selection().group_by(self.get_categories())
def get_mode(self):
if self.get_data_summary().Category[self.get_targets()[0]] == 'numerical':
return 'regression'
return 'classification'
def update_converted_defaults(self):
dtypes = self.get_dtypes()
defaults = self.get_defaults().copy()
defaults.update({key: float(defaults[key]) for key in dtypes['numerical']})
if 'range' in dtypes:
defaults.update({key: int(float(defaults[key])) for key in dtypes['range']})
self._converted_defaults = [[key] for key in defaults.values()]
self._keyed_defaults = defaults
def to_array(self, features):
df = self.get_df()[self.get_feature_names()].copy()
feature_types = self.get_dtypes()
for c in df.columns:
if c in features.keys():
if df[c].dtype == 'object':
if feature_types[c] == 'hash':
try:
features[c] = int(float(features[c]))
except:
pass
df.loc[:, c] = df.loc[:, c].astype('category')
mapp = {y: x for x, y in dict(enumerate(df[c].cat.categories)).items()}
features[c] = float(mapp[features[c]])
else:
features[c] = float(features[c])
feats = df[self.get_feature_names()].append(
pd.DataFrame(pd.Series(features)).transpose()[self.get_feature_names()]).tail(1)
input_predict = feats.values.reshape(-1)
return input_predict
def from_array(self, features):
df = self.get_df()[self.get_feature_names()].copy()
feature_types = self.get_dtypes()
for c in df.columns:
if c in features.keys():
if df[c].dtype == 'object':
df.loc[:, c] = df.loc[:, c].astype('category')
mapp = {x: y for x, y in dict(enumerate(df[c].cat.categories)).items()}
features[c] = np.vectorize(mapp.get)(features[c])
if feature_types[c] == 'hash':
features[c] = features[c].astype(str)
else:
features[c] = features[c].astype(df[c].dtype)
return features
def create_feat_array(self, features):
for t in self.get_targets():
del features[t]
features = {k: features[k] for k in self.get_feature_names()}
return self.to_array(features)
def clean_values(self, df):
df.replace([np.inf, -np.inf], np.nan, inplace=True)
for c in df.columns.values:
df.loc[:, c] = df.loc[:, c].fillna(self._keyed_defaults[c]).astype(type(self._keyed_defaults[c]))
return df
def make_numpy_array(self, file, sel_target=None):
df = self.clean_values(pd.read_csv(file))
target = sel_target or self.get_targets()[0]
y = df[target].values
df.drop(self.get_targets(), axis=1, inplace=True)
df = df[self.get_feature_names()]
for c in df.columns:
if df[c].dtype == 'object':
df.loc[:, c] = df.loc[:, c].astype('category')
cat_columns = df.select_dtypes(['category']).columns
df.loc[:, cat_columns] = df.loc[:, cat_columns].apply(lambda x: x.cat.codes)
return df.values, y
def get_categorical_features(self):
categorical_features = []
df = self.get_df()[self.get_feature_names()]
for c in df.columns:
if df[c].dtype == "object":
categorical_features.append(c)
else:
for x in self.get_feature_columns():
if type(x) == IndicatorColumn and x[0].key == c:
categorical_features.append(c)
categorical_index = [list(df.columns.values).index(x) for x in categorical_features]
categorical_names = {k: df[k].unique() for k in categorical_features}
return categorical_features, categorical_index, categorical_names
def train_input_fn(self, batch_size, num_epochs):
csv_dataset = make_csv_dataset([self.get_train_file()], batch_size=batch_size, shuffle=True,
label_names=self.get_targets(), num_epochs=num_epochs,
column_defaults=self.get_converted_defaults())
return csv_dataset
def validation_input_fn(self, batch_size):
csv_dataset = make_csv_dataset([self.get_validation_file()], batch_size=batch_size, shuffle=False,
label_names=self.get_targets(), num_epochs=1,
column_defaults=self.get_converted_defaults())
return csv_dataset
def test_input_fn(self, batch_size, file=None):
# file = file or self.get_test_file()[0] if isinstance(self.get_test_file(),
# list) else self.get_test_file() # TODO
csv_dataset = make_csv_dataset([file], batch_size=batch_size, shuffle=False,
label_names=self.get_targets(), num_epochs=1,
column_defaults=self.get_converted_defaults())
return csv_dataset
def input_predict_fn(self, features):
df = self.get_df()
for t in self.get_targets():
del features[t]
features = {k: features[k] for k in get_feature_names(self.get_feature_columns())}
for k, v in features.items():
features[k] = np.array([v]).astype(df[k].dtype) if df[k].dtype == 'object' else np.array(
[float(v)]).astype(df[k].dtype)
return tf.estimator.inputs.numpy_input_fn(x=features, y=None, num_epochs=1, shuffle=False)
def serving_input_receiver_fn(self):
feature_spec = tf.feature_column.make_parse_example_spec(self.get_feature_columns())
receiver_tensors = {k: tf.placeholder(v.dtype, [None, 1]) for k, v in feature_spec.items()}
return tf.estimator.export.ServingInputReceiver(receiver_tensors=receiver_tensors,
features=receiver_tensors)
def get_all_test_files(self):
return [f for f in os.listdir(os.path.join(self.get_base_path(), 'test')) if not f.startswith('.')]
```
#### File: ezeeai/utils/hooks.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.basic_session_run_hooks import NeverTriggerTimer, SecondOrStepTimer
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.util.tf_export import tf_export
import smtplib
from email.mime.text import MIMEText
@tf_export("train.EmailAtStepHook")
class EmailAtStepHook(session_run_hook.SessionRunHook):
def __init__(self, user_info, server_info, every_n_iter=None, every_n_secs=None,
at_end=False):
only_log_at_end = (
at_end and (every_n_iter is None) and (every_n_secs is None))
if (not only_log_at_end and
(every_n_iter is None) == (every_n_secs is None)):
raise ValueError(
"either at_end and/or exactly one of every_n_iter and every_n_secs "
"must be provided.")
if every_n_iter is not None and every_n_iter <= 0:
raise ValueError("invalid every_n_iter=%s." % every_n_iter)
self._timer = (
NeverTriggerTimer() if only_log_at_end else
SecondOrStepTimer(every_secs=every_n_secs, every_steps=every_n_iter))
self._log_at_end = at_end
self._user_info = user_info
self._server_info = server_info
self._timer.reset()
self._iter_count = 0
def begin(self):
pass
def before_run(self, run_context): # pylint: disable=unused-argument
self._should_trigger = self._timer.should_trigger_for_step(self._iter_count)
def after_run(self, run_context, run_values):
_ = run_context
if self._should_trigger:
self._send_email()
self._iter_count += 1
def end(self, session):
if self._log_at_end:
self._send_email()
def _send_email(self):
smtpserver = 'smtp.gmail.com:587'
header = 'From: %s' % self._server_info['email_address']
header += 'To: %s' % self._user_info['email_address']
header += 'Subject: %s' % "Training finished"
message = header + "Training finished"
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(self._server_info['login'], self._server_info['password'])
problems = server.sendmail(self._server_info['email_address'], self._user_info['email_address'], message)
server.quit()
```
#### File: ezeeai/utils/metrics.py
```python
import numpy as np
import os
from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score, accuracy_score, r2_score
from scipy import interp
from sklearn.preprocessing import label_binarize
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
def store_predictions(has_targets, sess, final_pred, output):
if has_targets:
sess.set('y_true', output)
sess.set('y_pred', np.array(final_pred['preds']))
if 'logits' in final_pred:
sess.set('logits', np.array(final_pred['logits']))
def roc_auc(y_test, y_score, classes):
fpr = {}
tpr = {}
roc_auc = {}
if len(classes) == 2:
if np.max(y_score) > 1:
y_score = sigmoid(y_score)
fpr['bin'], tpr['bin'], _ = roc_curve(y_test.reshape(-1), y_score.reshape(-1),
pos_label=np.array(classes).astype(y_test.dtype)[1])
roc_auc['bin'] = auc(fpr['bin'], tpr['bin'])
fpr['bin'] = fpr['bin'].tolist()
tpr['bin'] = tpr['bin'].tolist()
dict_results = {'roc_auc': roc_auc, 'fpr': fpr, 'tpr': tpr}
else:
if np.max(y_score) > 1:
y_score = softmax(y_score, axis=1)
y_test = label_binarize(y_test, classes=np.array(classes).astype(y_test.dtype))
n_classes = y_test.shape[1]
# y_score = y_score.reshape([-1, n_classes])
for i in range(n_classes):
fpr[classes[i]], tpr[classes[i]], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[classes[i]] = auc(fpr[classes[i]], tpr[classes[i]])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
all_fpr = np.unique(np.concatenate([fpr[classes[i]] for i in range(n_classes)]))
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[classes[i]], tpr[classes[i]])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
dict_results = {'roc_auc': roc_auc, 'fpr': fpr, 'tpr': tpr}
dict_results = to_list(dict_results)
return dict_results
def precision_recall(y_test, y_score, classes):
precision = {}
recall = {}
average_precision = {}
if len(classes) == 2:
if np.max(y_score) > 1:
y_score = sigmoid(y_score)
pos_label = np.array(classes).astype(y_test.dtype)[1]
precision['bin'], recall['bin'], _ = precision_recall_curve(y_test.reshape(-1),
y_score.reshape(-1),
pos_label=pos_label)
average_precision['bin'] = average_precision_score(y_test.reshape(-1),
y_score.reshape(-1), pos_label=pos_label)
precision['bin'] = precision['bin'].tolist()
recall['bin'] = recall['bin'].tolist()
dict_results = {'precision': precision, 'recall': recall, 'average_precision': average_precision}
else:
if np.max(y_score) > 1:
y_score = softmax(y_score, axis=1)
y_test = label_binarize(y_test, classes=np.array(classes).astype(y_test.dtype))
n_classes = y_test.shape[1]
# y_score = y_score.reshape([-1, n_classes])
precision = {}
recall = {}
average_precision = {}
for i in range(n_classes):
precision[classes[i]], recall[classes[i]], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[classes[i]] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
dict_results = {'precision': precision, 'recall': recall, 'average_precision': average_precision}
dict_results = to_list(dict_results)
return dict_results
def to_list(n_dict):
out = {}
for k, v in n_dict.items():
out[k] = {}
for k2, v2 in v.items():
out[k][k2] = v2.tolist()
return out
def softmax(X, theta=1.0, axis=None):
# make X at least 2d
y = np.atleast_2d(X)
# find axis
if axis is None:
axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
# multiply y against the theta parameter,
y = y * float(theta)
# subtract the max for numerical stability
y = y - np.expand_dims(np.max(y, axis=axis), axis)
# exponentiate y
y = np.exp(y)
# take the sum along the specified axis
ax_sum = np.expand_dims(np.sum(y, axis=axis), axis)
# finally: divide elementwise
p = y / ax_sum
# flatten if X was 1D
if len(X.shape) == 1: p = p.flatten()
return p
def sigmoid(x, derivative=False):
return x * (1 - x) if derivative else 1 / (1 + np.exp(-x))
def get_mode_metrics(has_targets, mode, labels, local_sess, targets):
if not has_targets:
return {}
return get_metrics('classification', local_sess.get_y_true(), local_sess.get_y_pred(), labels,
logits=local_sess.get_logits()) if mode == 'classification' \
else get_metrics('regression', local_sess.get_y_true(), local_sess.get_y_pred(), labels,
target_len=len(targets))
def get_metrics(mode, y_true, y_pred, labels, target_len=1, logits=None):
metrics = {}
if mode == 'classification':
roc = roc_auc(y_true, logits, labels)
pr = precision_recall(y_true, logits, labels)
metrics['roc'] = roc
metrics['pr'] = pr
metrics['accuracy'] = accuracy_score(y_true.reshape(-1), y_pred.reshape(-1).astype(y_true.dtype))
else:
if target_len > 1:
y_pred = y_pred.reshape(-1, target_len)
y_true = y_true.reshape(-1, target_len)
metrics['y_true'] = y_true.tolist()
metrics['y_pred'] = y_pred.tolist()
y_valid = ~np.isnan(y_pred).any(axis=1)
y_pred = y_pred[y_valid]
y_true = y_true[y_valid]
y_valid = ~np.isnan(y_true).any(axis=1)
y_pred = y_pred[y_valid]
y_true = y_true[y_valid]
metrics['r2_score'] = r2_score(y_true, y_pred, multioutput='raw_values').tolist()
else:
y_pred = y_pred.reshape(-1)
y_true = y_true.reshape(-1)
metrics['y_true'] = y_true.tolist()
metrics['y_pred'] = y_pred.tolist()
y_valid = ~np.isnan(y_pred)
y_pred = y_pred[y_valid]
y_true = y_true[y_valid]
y_valid = ~np.isnan(y_true)
y_pred = y_pred[y_valid]
y_true = y_true[y_valid]
metrics['r2_score'] = r2_score(y_true, y_pred)
return metrics
def train_eval_graphs(path):
train = {}
eval = {}
if not os.path.isdir(path):
return {}
train_events = [os.path.join(path, f) for f in os.listdir(path) if f.startswith('events.out.tfevents')]
if len(train_events) == 0:
return {}
train_events.sort(key=lambda x: os.path.getmtime(x))
train_summary = train_events[0]
summary_iterator = EventAccumulator(train_summary).Reload()
tags = [m for m in summary_iterator.Tags()['scalars'] if
m.split('_1')[0] in ['accuracy', 'r_squared', 'loss']]
if len(tags) == 0:
return {}
train['steps'] = [e.step for e in summary_iterator.Scalars(tags[0])]
for tag in tags:
train[tag.split('_1')[0]] = []
for e in summary_iterator.Scalars(tag):
train[tag.split('_1')[0]].append(e.value)
eval_events = []
if os.path.isdir(os.path.join(path, 'eval')):
eval_events = [os.path.join(path, 'eval', f) for f in os.listdir(os.path.join(path, 'eval')) if
f.startswith('events.out.tfevents')]
if len(eval_events) == 0:
return {'train': train}
eval_events.sort(key=lambda x: os.path.getmtime(x))
eval_summary = eval_events[0]
summary_iterator = EventAccumulator(eval_summary).Reload()
tags = [m for m in summary_iterator.Tags()['scalars'] if
m.split('_1')[0] in ['accuracy', 'r_squared', 'loss']]
if len(tags) == 0:
return {'train': train}
eval['steps'] = [e.step for e in summary_iterator.Scalars(tags[0])]
for tag in tags:
eval[tag.split('_1')[0]] = []
for e in summary_iterator.Scalars(tag):
eval[tag.split('_1')[0]].append(e.value)
return {'train': train, 'eval': eval}
```
#### File: ezeeai/utils/preprocessing.py
```python
import pandas as pd
import csv
def clean_field_names(filename):
args = {}
if not has_header(filename):
args['header'] = None
df = pd.read_csv(filename, sep=None, engine='python', **args)
df.columns = df.columns.astype(str)
df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_')
df.columns = df.columns.str.replace('(', '').str.replace(')', '').str.replace('.', '_')
df.columns = df.columns.str.replace('=', '_').str.replace(':', '-')
# if columns duplicated change
cols = pd.Series(df.columns)
for dup in df.columns.get_duplicates():
cols[df.columns.get_loc(dup)] = [dup + '_' + str(d_idx) if d_idx != 0 else dup for d_idx in
range(df.columns.get_loc(dup).sum())]
df.columns = cols
for c in df.columns:
try:
df[c] = df[c].astype(str).str.replace(',', '')
except:
pass
df.to_csv(filename, index=False)
return df
def check_train(train_file, targets):
if len(targets) > 1:
return True
df = pd.read_csv(train_file)
if df[targets[0]].dtype == 'object':
if len(df[targets[0]].unique()) < 2:
return False
return True
def has_header(csvfile, close=True):
if isinstance(csvfile, str):
csvfile = open(csvfile, 'r')
sniffer = csv.Sniffer()
sample_bytes = 50
try:
has_header = sniffer.has_header(csvfile.read(sample_bytes))
except:
has_header = sniffer.has_header(csvfile.read(sample_bytes + 50)) # TODO it does not work!!
if close:
csvfile.close()
else:
csvfile.seek(0)
print(str(csvfile) + ' has header: ' + str(has_header))
return has_header
```
#### File: ezeeai/utils/run_utils.py
```python
import os
import math
import json
import ntpath
import numpy as np
import pandas as pd
from tensorflow.python.framework.errors_impl import NotFoundError
from tensorflow.python.platform import gfile
from ..config import config_reader
from . import request_util, db_ops
from .metrics import train_eval_graphs
from .param_utils import set_form, set_checkpoint_dir
from .request_util import is_run
from .sys_ops import get_config_path, get_canned_data, get_log_mess, get_log_path
def define_empty_run_params():
model_name = ''
checkpoints = ''
metric = ''
graphs = {}
log_mess = None
return model_name, checkpoints, metric, graphs, log_mess
# def get_run_results(config_file, sess, username, USER_ROOT):
# model_name = config_file.split('/')[-2]
# sess.set_model_name(model_name)
# export_dir = config_reader.read_config(sess.get_config_file()).export_dir()
# checkpoints = get_eval_results(export_dir, sess.get_writer(), sess.get_config_file())
# metric = sess.get_metric()
# graphs = train_eval_graphs(config_reader.read_config(sess.get_config_file()).checkpoint_dir())
# log_mess = get_log_mess(USER_ROOT, username, model_name)
# return checkpoints, metric, graphs, log_mess
def get_html_types(dict_types):
dict_html_types = {}
for k, v in dict_types.items():
dict_html_types[k] = "text" if v in ['categorical', 'range'] else "number"
return dict_html_types
def get_dictionaries(features, categories, fs, targets):
dict_types = {}
categoricals = {}
cont = 0
for k, v in features.items():
if categories[cont] != 'none':
dict_types[k] = categories[cont]
if categories[cont] == 'categorical':
categoricals[k] = fs.cat_unique_values_dict[k]
else:
if categories[cont] == 'range':
categoricals[k] = fs.df[k].unique().tolist()
cont += 1
for target in targets:
if target in categoricals.keys():
categoricals.pop(target)
return dict_types, categoricals
def check_exports(directory):
results = {}
if not os.path.isfile(os.path.join(directory, 'export.log')):
return results
return json.load(open(os.path.join(directory, 'export.log'), 'r'))
def get_eval_results(directory, config_writer, CONFIG_FILE):
results = {}
if not os.path.isfile(os.path.join(directory, 'export.log')):
return results
try:
log_file = json.load(open(os.path.join(directory, 'export.log'), 'r'))
max_perf = 0
max_perf_index = 0
min_loss = math.inf
min_loss_index = 0
for k in list(log_file.keys()):
metric = "accuracy"
v = log_file[k]
if not os.path.isdir(k):
del log_file[k]
continue
step = str(int(v['global_step']))
if 'accuracy' in v.keys():
perf = v['accuracy']
else:
perf = v['r_squared']
metric = 'r_squared'
if max_perf < perf:
max_perf = perf
max_perf_index = step
loss = v['average_loss'] if 'average_loss' in v else v['loss']
if min_loss > loss:
min_loss = loss
min_loss_index = step
try:
perf = float("{0:.3f}".format(perf))
except ValueError:
perf = perf
results[ntpath.basename(k)] = {metric: perf, 'loss': float("{0:.3f}".format(loss)), 'step': step}
if 'TRAINING' in config_writer.config.sections():
config_writer.add_item('BEST_MODEL', 'max_perf', str(float("{0:.3f}".format(max_perf))))
config_writer.add_item('BEST_MODEL', 'max_perf_index', str(max_perf_index))
config_writer.add_item('BEST_MODEL', 'min_loss', str(float("{0:.3f}".format(min_loss))))
config_writer.add_item('BEST_MODEL', 'min_loss_index', str(min_loss_index))
config_writer.write_config(CONFIG_FILE)
except json.JSONDecodeError:
pass
return results
def get_predictions(targets, final_pred):
return dict(zip(targets, final_pred.astype(str))) if len(targets) > 1 else {targets[0]: str(final_pred)}
def create_result_parameters(request, sess, checkpoint=None):
all_params_config = config_reader.read_config(sess.get_config_file())
try:
rb = request_util.get_radiob(request) if checkpoint is None else checkpoint
except:
rb = request.get_json()['checkpoint']
set_checkpoint_dir(all_params_config, rb)
return all_params_config
def get_explain_disabled(cat_list):
if any(k in cat_list for k in ("hash", "int-hash")):
return 'true'
return 'false'
def ckpt_to_table(checkpoints):
perf = next(iter(checkpoints[next(iter(checkpoints))]))
data = [[k, v[perf], v['loss']] for k, v in checkpoints.items()]
columns = ["Model", perf.capitalize(), "Loss"]
return pd.DataFrame(data, columns=columns)
def get_step(train_size, batch_size, path, file_pattern='*.ckpt-*.index'):
full_event_file_pattern = os.path.join(path, file_pattern)
try:
files = gfile.Glob(os.path.join(full_event_file_pattern))
if len(files) == 0:
return 0
files.sort(key=os.path.getmtime)
event_file = files[-1]
steps = int(event_file.split('.ckpt-')[-1].split('.')[0])
epochs = int(np.floor((steps * batch_size) / train_size))
# print(steps, epochs)
return epochs
except (FileNotFoundError, NotFoundError):
return None
def run_post(sess, request, USER_ROOT, username, th):
sess.run_or_pause(is_run(request))
model_name = request.form['model_name']
config_path = get_config_path(USER_ROOT, username, model_name)
sess.set_model_name(model_name)
sess.set_config_file(config_path)
sess.load_config()
sess.get_writer().populate_config(request.form)
sess.get_writer().write_config(sess.get_config_file())
th.run_tensor_board(username, sess.get_config_file())
all_params_config = config_reader.read_config(sess.get_config_file())
get_canned_data(USER_ROOT, username, model_name, all_params_config)
all_params_config.set_email(db_ops.get_email(username))
sess.check_log_fp(all_params_config)
return all_params_config
def load_run_config(sess, th, username, form, USER_ROOT, models):
model_name, checkpoints, metric, graphs, log_mess = define_empty_run_params()
running, config_file = th.check_running(username)
if not running:
config_file = sess.get_config_file() if sess.check_key('config_file') else None
if config_file is not None and os.path.isfile(config_file):
sess.set_config_file(config_file)
sess.load_config()
set_form(form, sess.get_config_file())
model_name = ntpath.basename(config_file.rstrip('config.ini').rstrip('/').rstrip('\\'))
if model_name not in models:
model_name = ''
return running, model_name, checkpoints, metric, graphs, log_mess
sess.set_model_name(model_name)
export_dir = config_reader.read_config(sess.get_config_file()).export_dir()
checkpoints = get_eval_results(export_dir, sess.get_writer(), sess.get_config_file())
metric = sess.get_metric()
graphs = train_eval_graphs(config_reader.read_config(sess.get_config_file()).checkpoint_dir())
log_path = get_log_path(USER_ROOT, username, model_name)
log_mess = open(log_path, 'r').read() if os.path.isfile(log_path) else ''
return running, model_name, checkpoints, metric, graphs, log_mess
```
#### File: jmarine/ezeeai/wsgi.py
```python
import webbrowser
from threading import Timer
from ezeeai.dfweb import app, appConfig
def open_browser():
webbrowser.open_new('http://localhost:5000')
if __name__ == "__main__":
Timer(1, open_browser).start()
app.run(debug=False,
threaded=True,
host=appConfig.host(),
port=appConfig.port())
``` |
{
"source": "jmarini/photoemission-analysis",
"score": 3
} |
#### File: photoemission-analysis/src/limiting_fit.py
```python
from __future__ import print_function, unicode_literals, division
import os
import pandas as pd
import numpy as np
import scipy
import statsmodels.api as sm
import matplotlib.pyplot as plt
import sklearn
from sklearn.cluster import KMeans
from sklearn.preprocessing import scale
from linear_fit import LinearFit
def cluster_data(data, columns):
""" Perform k-means clustering (N=2) for the specified columns.
Filters on the cluster with the higher mean threshold energy.
"""
X = data[columns].values
sX = scale(X)
estimator = KMeans(n_clusters=2)
estimator.fit(sX)
Z = estimator.predict(sX)
data['cluster'] = Z
cluster = data.groupby(['cluster']).mean().sort_values(by='Eth', ascending=False).index[0]
return data[data.cluster==cluster]
def iterative_fit(data, xcol, ycol, delta=1e-3, max_iterations=10):
""" Perform iterative OLS fit to find the limiting relationship in the data.
"""
condition = None
last = 0.
for n in range(max_iterations):
if condition is not None:
df = data[condition]
else:
df = data
x = df[xcol].values
X = sm.add_constant(x)
y = df[ycol].values
model = sm.OLS(y, X)
results = model.fit()
fit = LinearFit(x, y, results)
if np.abs(last - fit.x_intercept) <= delta:
print('Converged in {} iterations'.format(n))
break
last = fit.x_intercept
condition = data[ycol] > (fit.yp(xp=data[xcol]) - fit.confidence_interval(xp=data[xcol]))
return fit
if __name__ == '__main__':
data = pd.read_csv('data/interim/photoemission.csv')
data['logQE'] = np.log10(data.QE)
data.loc[data.logQE==-np.inf, 'logQE'] = -6
data['sqrtQE'] = np.sqrt(data.QE)
data['QE3'] = np.power(data.QE, 1. / 3.)
data['slope2'] = np.power(data.slope, 2.)
data['slope3'] = np.power(data.slope, 3.)
df = cluster_data(data, ['Eth', 'logQE'])
xerr = np.array([df.err_minus.values, df.err_plus.values])
ycol = 'slope'
fit = iterative_fit(df, 'Eth', ycol)
fig, ax = plt.subplots(1, 1)
ymax = df[ycol].max() * 1.1
df.plot(x='Eth', y=ycol, kind='scatter', xerr=xerr, c='k', ylim=(0, ymax), xlim=(4, 5), ax=ax)
ax.set_xlabel(r'$E_{th}$')
ax.set_ylabel(r'$m^3$')
fit.plot_fit(np.arange(4, 5, 0.01), axis=ax, ci=False)
plt.show()
``` |
{
"source": "jmarinllao/kegg",
"score": 2
} |
#### File: src/bio2bel_kegg/models.py
```python
from __future__ import annotations
from typing import List, Optional
from sqlalchemy import Column, ForeignKey, Integer, String, Table, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
import pybel.dsl
from bio2bel.compath import CompathPathwayMixin, CompathProteinMixin
from bio2bel.manager.models import SpeciesMixin
from .constants import HGNC, KEGG, MODULE_NAME
Base = declarative_base()
SPECIES_TABLE_NAME = f'{MODULE_NAME}_species'
PATHWAY_TABLE_NAME = f'{MODULE_NAME}_pathway'
PATHWAY_TABLE_HIERARCHY = f'{MODULE_NAME}_pathway_hierarchy'
PROTEIN_TABLE_NAME = f'{MODULE_NAME}_protein'
PROTEIN_PATHWAY_TABLE = f'{MODULE_NAME}_protein_pathway'
protein_pathway = Table(
PROTEIN_PATHWAY_TABLE,
Base.metadata,
Column('protein_id', Integer, ForeignKey(f'{PROTEIN_TABLE_NAME}.id'), primary_key=True),
Column('pathway_id', Integer, ForeignKey(f'{PATHWAY_TABLE_NAME}.id'), primary_key=True),
)
class Species(Base, SpeciesMixin):
"""Species table."""
__tablename__ = SPECIES_TABLE_NAME
class Pathway(Base, CompathPathwayMixin):
"""Pathway Table."""
__tablename__ = PATHWAY_TABLE_NAME
id = Column(Integer, primary_key=True) # noqa:A003
bel_encoding = 'B'
prefix = KEGG
identifier = Column(String(255), unique=True, nullable=False, index=True, doc='KEGG id of the pathway')
name = Column(String(255), nullable=False, doc='pathway name')
definition = Column(Text, nullable=True, doc='pathway description')
species = relationship(Species, backref='pathways')
species_id = Column(Integer, ForeignKey(f'{Species.__tablename__}.id'))
proteins = relationship(
'Protein',
secondary=protein_pathway,
backref='pathways',
)
class Protein(Base, CompathProteinMixin):
"""Genes Table."""
__tablename__ = PROTEIN_TABLE_NAME
id = Column(Integer, primary_key=True) # noqa:A003
kegg_id = Column(String(255), nullable=False, index=True, doc='KEGG id of the protein')
entrez_id = Column(String(255), nullable=False, index=True, doc='Entrez identifier')
uniprot_id = Column(String(255), doc='uniprot id of the protein (there could be more than one)')
hgnc_id = Column(String(255), doc='hgnc id of the protein')
hgnc_symbol = Column(String(255), doc='hgnc symbol of the protein')
def __repr__(self):
"""Return HGNC symbol."""
return f'Protein(kegg_id={self.kegg_id}, ' \
f'uniprot_id={self.uniprot_id}, hgnc_id={self.hgnc_id}, hgnc_symbol={self.hgnc_symbol})'
def __str__(self):
"""Return HGNC symbol."""
return str(self.hgnc_symbol)
def to_pybel(self) -> pybel.dsl.Protein:
"""Serialize to PyBEL node data dictionary."""
return pybel.dsl.Protein(
namespace=HGNC,
identifier=self.hgnc_id,
name=self.hgnc_symbol,
)
def get_uniprot_ids(self) -> Optional[List[str]]:
"""Return a list of uniprot ids."""
if not self.uniprot_id:
return None
return self.uniprot_id.split(" ")
``` |
{
"source": "jmarino/gudhi-devel",
"score": 3
} |
#### File: points/generator/aurelien_alvarez_surfaces_in_R8.py
```python
import numpy as np
import random
from math import factorial
"""This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): <NAME>
Copyright (C) 2016 Université d'Orléans (France)
Modification(s):
- YYYY/MM Author: Description of the modification
"""
I = complex(0,1)
#################################################
#################################################
#Surface réelle d'équation x.conj(y)^d + y.conj(z)^d + z.conj(x)^d = 0 dans P2(C)
#Équation affine (z=1) multipliée par sa conjuguée (d = 2) : x.conj(x)^2.y^4 + 2x^3.conj(x).y^2 + y + conj(x)^2 + x^5 = 0
def equationAffineSurfaceReelle(x):
polynome = [0]*(degre**2+1)
for k in range(degre+1):
polynome[k*degre] = (-1)**degre*x*factorial(degre)/(factorial(k)*factorial(degre-k))*x**(k*degre)*np.conjugate(x)**(degre-k)
polynome[-2] += 1
polynome[-1] += np.conjugate(x)**degre
return polynome
#################################################
#################################################
def calculRacines(equation,nombrePoints,module_x):
racines = [[1,0,0],[0,1,0],[0,0,1]]
for _ in range(nombrePoints):
x = module_x*(2*random.random()-1+I*(2*random.random()-1))
fool = [[[x,y,1],[y,1,x],[1,x,y]] for y in np.roots(equation(x)) if abs(x*np.conjugate(y)**degre+y+np.conjugate(x)**degre) < 0.0001]
for bar in fool:
racines += bar
return racines
#################################################
#################################################
def plongementDansR8(pointDansCP2):
z0 = pointDansCP2[0]
z1 = pointDansCP2[1]
z2 = pointDansCP2[2]
a = z0*np.conjugate(z0)
b = z1*np.conjugate(z1)
c = z2*np.conjugate(z2)
normeCarree = a+b+c
a = a/normeCarree
b = b/normeCarree
u = z0*np.conjugate(z1)/normeCarree
v = z0*np.conjugate(z2)/normeCarree
w = z1*np.conjugate(z2)/normeCarree
return [a.real,b.real,u.real,u.imag,v.real,v.imag,w.real,w.imag]
def plongementListeDansR8(listePointsDansCP2):
listePointsDansR8 = []
for point in listePointsDansCP2:
listePointsDansR8 += [plongementDansR8(point)]
return listePointsDansR8
#################################################
#################################################
degre = 3
nombrePoints = 10**4
module_x = 10
with open("surface.txt","w") as fichier:
bar = calculRacines(equationAffineSurfaceReelle,nombrePoints,module_x)
listePoints = plongementListeDansR8(bar)
fichier.write(str(len(bar)) + "\n")
for point in listePoints:
fichier.write(str(point[0]) + " " + str(point[1]) + " " + str(point[2]) + " " + str(point[3]) + " " + str(point[4]) + " " + str(point[5]) + " " + str(point[6]) + " " + str(point[7]) + "\n")
``` |
{
"source": "j-marjanovic/chisel-stuff",
"score": 3
} |
#### File: axi_proxy/drivers/run_single.py
```python
import argparse
import os
from AxiProxyTest import AxiProxyTest
from configs import configs
def main():
parser = argparse.ArgumentParser(description="Run a read/write test with AXI Proxy")
parser.add_argument(
"port", choices=configs.keys(), help="Specify Zynq MP port to use"
)
parser.add_argument("--override-axi-prot", type=int, help="Override AXI AxPROT")
parser.add_argument("--quirk-read-repeat", type=int, help="Perform repeated reads")
args = parser.parse_args()
config = configs[args.port]
if args.override_axi_prot is not None:
config.axi_conf["prot"] = args.override_axi_prot
test = AxiProxyTest(config)
test.test_hw_write_hw_read(args.quirk_read_repeat)
test.test_hw_write_sw_read(args.quirk_read_repeat)
test.test_sw_write_hw_read(args.quirk_read_repeat)
test.test_sw_write_sw_read(args.quirk_read_repeat)
print("Test succesfully finished")
if __name__ == "__main__":
main()
```
#### File: axi_traffic_gen/drivers/run_test.py
```python
import json
import logging
import sys
from AxiTrafficGen import AxiTrafficGen
from UioDev import get_uio_dev_file
from Udmabuf import Udmabuf
from configs import configs
class TrafficGenTest:
def __init__(self, config):
dev = get_uio_dev_file("AxiTrafficGen", search_note=config.port_name)
self.axi_tg = AxiTrafficGen(dev)
self.axi_tg.print_info()
if config.udmabuf_flags is not None:
print(f"udmabuf opened with extra flag = 0x{config.udmabuf_flags:x}")
self.udmabuf = Udmabuf("amba:udmabuf@0x0", extra_flags=config.udmabuf_flags)
assert self.udmabuf._get_value("dma_coherent") == 1
self.axi_tg.config_addr(self.udmabuf.phys_addr)
if config.axi_conf is not None:
print(f"AXI config = {config.axi_conf}")
self.axi_tg.config_axi(**config.axi_conf)
def run(self, size_burst):
self._clean_mem(size_burst)
self.axi_tg.config_len(size_burst)
self.axi_tg.start_write()
self.axi_tg.wait_write_done()
self.axi_tg.start_read()
self.axi_tg.wait_read_done()
self.axi_tg.done_clear()
stats = self.axi_tg.get_stats()
assert stats.rd_ok == size_burst * self.axi_tg.BURST_LEN_BEATS
self._check_mem(size_burst)
size_bytes = size_burst * self.axi_tg.BURST_LEN_BYTES
print(
f"{size_bytes:8} | rd = {stats.rd_cyc}, wr = {stats.wr_cyc} ",
flush=True,
)
return size_bytes, stats.rd_cyc, stats.wr_cyc
def _clean_mem(self, size_burst):
for byte_addr in range(size_burst * self.axi_tg.BURST_LEN_BYTES // 4):
self.udmabuf.wr32(byte_addr * 4, -1 & 0xFFFFFFFF)
def _check_mem(self, size_burst):
exp_val = [0] * self.axi_tg.BURST_LEN_BEATS
for burst_addr in range(size_burst):
for beat_offs in range(self.axi_tg.BURST_LEN_BEATS):
for byte_offs in range(0, self.axi_tg.BEAT_LEN_BYTES, 4):
byte_addr = (
burst_addr * self.axi_tg.BURST_LEN_BYTES
+ beat_offs * self.axi_tg.BEAT_LEN_BYTES
+ byte_offs
)
data = self.udmabuf.rd32(byte_addr)
assert data == exp_val[byte_offs // 4]
exp_val[0] += 1 # cascading not necessary
def main():
NR_MEAS_PER_SIZE = 5
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) != 2:
print(f"Usage: {sys.argv[0]} OUT_FILENAME")
return False
meas = dict()
for name, config in configs.items():
print(name)
test = TrafficGenTest(config)
sizes = [2 ** i for i in range(20)]
sizes_bytes = []
durs_rd = []
durs_wr = []
for size in sizes:
for _ in range(NR_MEAS_PER_SIZE):
size_bytes, dur_rd, dur_wr = test.run(size)
sizes_bytes.append(size_bytes)
durs_rd.append(dur_rd)
durs_wr.append(dur_wr)
meas[name] = dict()
meas[name]["size"] = sizes
meas[name]["rd"] = durs_rd
meas[name]["wr"] = durs_wr
print(meas)
with open(sys.argv[1], "w") as f:
json.dump(meas, f)
if __name__ == "__main__":
main()
``` |
{
"source": "JMark1991/Photo_Tagger",
"score": 3
} |
#### File: Photo_Tagger/image-similarity-clustering/cli.py
```python
import argparse
import sys
import os
from parse_data import parse_data
from features import extract_features
from tsne_reducer import tsne
from umap_reducer import umap
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('mode', help='extract | tsne | umap')
parser.add_argument('data', help='[features]: Filepath to an image or folder containing images to extract features from. [tsne/umap]: Filepath to a .csv file to read into a DataFrame. ')
parser.add_argument('out', help='Output filepath of operation')
parser.add_argument('--feature-cols', '-f', help='[tsne/umap]: Numerical data column indices to treat as features. Ex: "B,C,F", use "all" to consider all columns (excluding optional unique-col).')
parser.add_argument('--unique-col', '-u', help='[tsne/umap]: The column index containing unique IDs for each row (typically "ID" or "Name" column). Not required. Omitted from "all" feature-cols')
parser.add_argument('--reduce', '-r', help='[tsne/umap]: How many dimensions to reduce features to. Default is 2.', default='2')
parser.add_argument('--model', '-m', help='[features]: Which model to use. ResNet50 | Xception | VGG16 | VGG19 | InceptionV3 | MobileNet. Default: ResNet50', default='ResNet50')
args = parser.parse_args(argv[1:])
# === FEATURE EXTRACTION ===
# We expect an image filepath or folder of images
if args.mode == 'features':
assert os.path.exists(args.data),\
'Features mode (data arg): File or directory not found: "{}"'\
.format(args.data)
# Calculate and write to args.out
features = extract_features(args.data, model=args.model, write_to=args.out)
# === DIMENSION REDUCTION ===
# We expect a .csv file of features
elif args.mode in ['tsne', 'umap']:
# Make sure we know what columns are intended to be used numerically as a list of strings, or 'all'
feature_cols = args.feature_cols
if feature_cols is None:
raise Exception('Feature reduction mode: No data column indices provided. Example usage: "--feature-cols B,C,F", "--feature-cols all"')
elif feature_cols != 'all':
feature_cols = [s.strip() for s in feature_cols.split(',') if s.strip() != '']
# Parse the data into a squashed pd.DataFrame with first column being unique keys
df = parse_data(args.data, feature_cols, args.unique_col)
if args.mode == 'tsne':
tsne(df, dims=int(args.reduce), write_to=args.out)
elif args.mode == 'umap':
umap(df, write_to=args.out)
if __name__ == '__main__':
sys.exit(main(sys.argv))
```
#### File: JMark1991/Photo_Tagger/predictor.py
```python
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout
import pandas as pd
#X = pd.read_csv('test_sample.csv')
#X.drop('Unnamed: 0', axis=1, inplace=True)
def predict_location(X, model='Model_01.h5'):
# clean dataset
for i in range(0,2048):
X[str(i)] = pd.to_numeric(X[str(i)], errors='coerce')
X.dropna(axis=1, inplace=True)
if len(X) < 1:
print('Error')
return 'Error'
X.drop('ID', axis=1, inplace=True)
# load the model from disk
loaded_model = load_model(model)
#loaded_model.load_weights('NN_Models/test.hdf5')
# Generate predictions
predictions = loaded_model.predict(X)
print('Predictions: ', predictions, sep='\n', end='\n')
return predictions
#predictions = predict_location(X)
``` |
{
"source": "jmark/FMM3D",
"score": 2
} |
#### File: python/test/test_lfmm.py
```python
import fmm3dpy as fmm
import numpy as np
import numpy.linalg as la
def main():
test_lfmm()
def test_lfmm():
ntests = 36
testres = np.zeros(ntests)
#
# This is a testing code for making sure all the
# fmm routines are accessible through fmm3d.py
#
n = 2000
ntest = 10
zk = 1.1 + 1j*0
sources = np.random.uniform(0,1,(3,n))
stmp = sources[:,0:ntest]
nt = 1880
targ = np.random.uniform(0,1,(3,nt))
ttmp = targ[:,0:ntest]
eps = 10**(-5)
zk = 1.1 + 1j*0
charges = np.random.uniform(0,1,n)
dipvec = np.random.uniform(0,1,(3,n))
outex=fmm.Output()
itest = 0
out=fmm.lfmm3d(eps=eps,sources=sources,charges=charges,pg=1)
out2 = fmm.l3ddir(sources=sources,targets=stmp,charges=charges,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot")
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,dipvec=dipvec,pg=1)
out2 = fmm.l3ddir(sources=sources,targets=stmp,dipvec=dipvec,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,charges=charges, \
dipvec=dipvec,pg=1)
out2 = fmm.l3ddir(sources=sources,targets=stmp,charges=charges, \
dipvec=dipvec,pgt=1)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,charges=charges,pg=2)
out2 = fmm.l3ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot and grad")
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,dipvec=dipvec,pg=2)
out2 = fmm.l3ddir(sources=sources,targets=stmp,dipvec=dipvec, \
pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot and grad")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,charges=charges, \
dipvec=dipvec,pg=2)
out2 = fmm.l3ddir(sources=sources,targets=stmp,charges=charges,
dipvec=dipvec,pgt=2)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot and grad")
itest=itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1)
out2=fmm.l3ddir(sources=sources,targets=ttmp,charges=charges,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot")
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,\
dipvec=dipvec,pgt=1)
out2=fmm.l3ddir(sources=sources,targets=ttmp,\
dipvec=dipvec,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipvec=dipvec,pgt=1)
out2=fmm.l3ddir(sources=sources,targets=ttmp, \
charges=charges, \
dipvec=dipvec,pgt=1)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2)
out2=fmm.l3ddir(sources=sources,targets=ttmp,charges=charges,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot and grad")
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,\
dipvec=dipvec,pgt=2)
out2=fmm.l3ddir(sources=sources,targets=ttmp,\
dipvec=dipvec,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot and grad")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipvec=dipvec,pgt=2)
out2 =fmm.l3ddir(sources=sources,targets=ttmp,charges=charges,\
dipvec=dipvec,pgt=2)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot and grad")
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,pg=1)
out2=fmm.l3ddir(sources=sources,targets=stmp,charges=charges,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l3ddir(sources=sources,targets=ttmp,charges=charges,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot")
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,\
dipvec=dipvec,pgt=1,pg=1)
out2=fmm.l3ddir(sources=sources,targets=stmp, \
dipvec=dipvec,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l3ddir(sources=sources,targets=ttmp, \
dipvec=dipvec,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipvec=dipvec,pgt=1,pg=1)
out2=fmm.l3ddir(sources=sources,targets=stmp,charges=charges, \
dipvec=dipvec,pgt=1)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l3ddir(sources=sources,targets=ttmp,charges=charges, \
dipvec=dipvec,pgt=1)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,pg=2)
out2=fmm.l3ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l3ddir(sources=sources,targets=ttmp,charges=charges, \
pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot and grad")
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,\
dipvec=dipvec,pgt=2,pg=2)
out2=fmm.l3ddir(sources=sources,targets=stmp, \
dipvec=dipvec,pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l3ddir(sources=sources,targets=ttmp,dipvec=dipvec, \
pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot and grad")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipvec=dipvec,pgt=2,pg=2)
out2=fmm.l3ddir(sources=sources,targets=stmp,charges=charges, \
dipvec=dipvec,pgt=2)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l3ddir(sources=sources,targets=ttmp,charges=charges, \
dipvec=dipvec,pgt=2)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot and grad")
nd = 2
charges = np.random.uniform(0,1,(nd,n))
dipvec = np.random.uniform(0,1,(nd,3,n))
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,charges=charges,pg=1,nd=nd)
out2 = fmm.l3ddir(sources=sources,targets=stmp,charges=charges,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot, vectorized")
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,dipvec=dipvec,pg=1,nd=nd)
out2 = fmm.l3ddir(sources=sources,targets=stmp,dipvec=dipvec,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,charges=charges, \
dipvec=dipvec,pg=1,nd=nd)
out2 = fmm.l3ddir(sources=sources,targets=stmp,charges=charges, \
dipvec=dipvec,pgt=1,nd=nd)
out2.pot = out2.pottarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,charges=charges,pg=2,nd=nd)
out2 = fmm.l3ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,dipvec=dipvec,pg=2,nd=nd)
out2 = fmm.l3ddir(sources=sources,targets=stmp,dipvec=dipvec, \
pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,charges=charges, \
dipvec=dipvec,pg=2,nd=nd)
out2 = fmm.l3ddir(sources=sources,targets=stmp,charges=charges,
dipvec=dipvec,pgt=2,nd=nd)
out2.pot = out2.pottarg
out2.grad = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pg=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources, charges and dipoles, pot and grad, vectorized")
itest=itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,nd=nd)
out2=fmm.l3ddir(sources=sources,targets=ttmp,charges=charges,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot, vectorized")
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,\
dipvec=dipvec,pgt=1,nd=nd)
out2=fmm.l3ddir(sources=sources,targets=ttmp,\
dipvec=dipvec,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipvec=dipvec,pgt=1,nd=nd)
out2=fmm.l3ddir(sources=sources,targets=ttmp, \
charges=charges, \
dipvec=dipvec,pgt=1,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,nd=nd)
out2=fmm.l3ddir(sources=sources,targets=ttmp,charges=charges,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,\
dipvec=dipvec,pgt=2,nd=nd)
out2=fmm.l3ddir(sources=sources,targets=ttmp,\
dipvec=dipvec,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipvec=dipvec,pgt=2,nd=nd)
out2 =fmm.l3ddir(sources=sources,targets=ttmp,charges=charges,\
dipvec=dipvec,pgt=2,nd=nd)
err = fmm.comperr(ntest=ntest,out=out,outex=out2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to targets, charges and dipoles, pot and grad, vectorized")
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=1,pg=1,nd=nd)
out2=fmm.l3ddir(sources=sources,targets=stmp,charges=charges,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l3ddir(sources=sources,targets=ttmp,charges=charges,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot, vectorized")
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,\
dipvec=dipvec,pgt=1,pg=1,nd=nd)
out2=fmm.l3ddir(sources=sources,targets=stmp, \
dipvec=dipvec,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l3ddir(sources=sources,targets=ttmp, \
dipvec=dipvec,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot, vectorized")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ, \
charges=charges, \
dipvec=dipvec,pgt=1,pg=1,nd=nd)
out2=fmm.l3ddir(sources=sources,targets=stmp,charges=charges, \
dipvec=dipvec,pgt=1,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l3ddir(sources=sources,targets=ttmp,charges=charges, \
dipvec=dipvec,pgt=1,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=1,pgt=1,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot, vectorized")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,charges=charges,pgt=2,pg=2,nd=nd)
out2=fmm.l3ddir(sources=sources,targets=stmp,charges=charges, \
pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l3ddir(sources=sources,targets=ttmp,charges=charges, \
pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges, pot and grad, vectorized")
itest = itest+1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,\
dipvec=dipvec,pgt=2,pg=2,nd=nd)
out2=fmm.l3ddir(sources=sources,targets=stmp, \
dipvec=dipvec,pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l3ddir(sources=sources,targets=ttmp,dipvec=dipvec, \
pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, dipoles, pot and grad, vectorized")
itest = itest + 1
out=fmm.lfmm3d(eps=eps,sources=sources,targets=targ,charges=charges,\
dipvec=dipvec,pgt=2,pg=2,nd=nd)
out2=fmm.l3ddir(sources=sources,targets=stmp,charges=charges, \
dipvec=dipvec,pgt=2,nd=nd)
outex.pot = out2.pottarg
outex.grad = out2.gradtarg
out2=fmm.l3ddir(sources=sources,targets=ttmp,charges=charges, \
dipvec=dipvec,pgt=2,nd=nd)
outex.pottarg = out2.pottarg
outex.gradtarg = out2.gradtarg
err = fmm.comperr(ntest=ntest,out=out,outex=outex,pg=2,pgt=2,nd=nd)
if(err<eps):
testres[itest] = 1
else:
print("Failed sources to sources and targets, charges and dipoles, pot and grad, vectorized")
if(sum(testres)==ntests):
print("all lfmm tests succeeded")
if __name__ == "__main__":
main()
``` |
{
"source": "JMarkin/afbmq",
"score": 2
} |
#### File: afbmq/examples/echo_bot_webhook.py
```python
import asyncio
import logging
import config
from afbmq import FB
from afbmq.dispatcher import Dispatcher
from afbmq.types import Event, RecipientRequest, MessageRequest
from afbmq.types.message import Message
logging.basicConfig(level=logging.INFO)
loop = asyncio.get_event_loop()
fb = FB(confirmation_code=config.FB_CONFIRMATION_CODE,
access_token=config.FB_ACCESS_TOKEN,
loop=loop)
dp = Dispatcher(fb, loop=loop)
@dp.message_handler()
async def echo_handler(event: Event, message: Message):
await message.send_message(RecipientRequest(id=event.sender.id), message=MessageRequest(text=message.text))
async def shutdown(_):
await fb.close()
await asyncio.sleep(0.250)
if __name__ == '__main__':
from afbmq.utils.executor import start_webhook
start_webhook(dispatcher=dp, webhook_path=config.WEBHOOK_PATH,
host=config.WEBAPP_HOST, port=config.WEBAPP_PORT,
on_shutdown=shutdown)
``` |
{
"source": "JMarkin/aVKapi",
"score": 3
} |
#### File: avkapi/methods/base.py
```python
import logging
logger = logging.getLogger(__name__)
class BaseMethod:
def __init__(self, session, access_token, api_version):
self._session = session
self._access_token = access_token
self._api_version = api_version
async def _api_request(self, method_name, parameters):
"""
:param method_name:
:type method_name: str
:param parameters:
:type parameters: dict
:return:
"""
parameters['access_token'] = self._access_token
parameters['v'] = self._api_version
p = {k: v for k, v in parameters.items() if v is not None}
link = f'https://api.vk.com/method/{method_name}'
async with self._session.post(link, params=p) as resp:
status = resp.status
text = await resp.text()
logger.info(f'Response: {status}, {text}')
return text
```
#### File: aVKapi/avkapi/vk.py
```python
import asyncio
import logging
from aiohttp import ClientSession
from .methods import Messages
from .utils import json
logger = logging.getLogger(__name__)
API_URL = 'https://api.vk.com/method/'
class VK:
def __init__(self, confirmation_code=None, secret_key=None, access_token=None, loop=None):
"""
:type confirmation_code: str
"""
self.confirmation_code = confirmation_code
self.secret_key = secret_key
self.access_token = access_token
self.api_version = '5.80'
# asyncio loop instance
if loop is None:
loop = asyncio.get_event_loop()
self.loop = loop
self._session = ClientSession(loop=self.loop, json_serialize=json.dumps)
self.messages = Messages(access_token=access_token, session=self._session, api_version=self.api_version)
async def get_session(self):
if not self._session:
self._session = ClientSession()
async def api_request(self, method_name, parameters):
link = f'{API_URL}{method_name}?{parameters}'
params = {
'access_token': self.access_token,
'v': self.api_version
}
params.update(**parameters)
async with self._session.post(link) as resp:
status = resp.status
text = await resp.text()
logger.info(f'Response: {status}, {text}')
async def close(self):
if isinstance(self._session, ClientSession) and not self._session.closed:
await self._session.close()
``` |
{
"source": "JMarkin/simple-proxy",
"score": 2
} |
#### File: simple-proxy/app/aggregate.py
```python
import json
import emojis as emoji
import esprima
from bs4 import BeautifulSoup
from app.deep_find import gen_dict_extract
blacklist = [
"[document]",
"noscript",
"header",
"html",
"meta",
"head",
"input",
"script",
"style",
]
class AggregateHtml:
# обработка html файла
def __init__(self, raw, emojis):
self.soup = BeautifulSoup(raw.decode("utf-8"), "lxml")
self._emojis = emojis
self.remove = []
def add_emoji(self, match):
emoj = next(self._emojis)
return "%s%s" % (match.group(1), emoj)
def change_str(self, t):
new_s = []
for i in t.split():
emoj = ""
if len(i) == 6:
emoj = next(self._emojis)
new_s.append(f"{i}{emoj}")
return emoji.encode(" ".join(new_s))
def change_str_dict(self, d, k):
d[k] = self.change_str(d[k])
def aggregate(self):
# поиск и изменение всех текстовых нод для html
for t in self.soup.find_all(text=True):
if str(t) == "\n" or str(t) == " ":
continue
if t.parent.name in blacklist:
continue
t.parent.append(self.change_str(t))
self.remove.append(t)
for t in self.remove:
t.extract()
# т.к. сайт построен на nuxtjs внизу хранится объект
# накста в котором перечислены объекты и данные котоыре потом заполнятся
script = {}
txt = None
for s in self.soup.find_all("script"):
if s.string and s.string.find("__NUXT__") >= 0:
# находим скрипт и переводим его в словарик
script = esprima.toDict(esprima.parseScript(s.string))
txt = s
break
if not txt:
return
# заменяем все значнеи котоыре можем найти
for old, new in gen_dict_extract(
"value", script, self.change_str_dict
):
if old != new:
txt.string = txt.string.replace(old, new)
class AggregateJson(AggregateHtml):
# обработка json-ов
def __init__(self, raw, emojis):
self.json = json.loads(raw.decode("utf-8"))
self._emojis = emojis
self.remove = []
def aggregate(self):
# видос запросов много поэтому тупо прокручиваем все ключи которые известны
list(gen_dict_extract("title", self.json, self.change_str_dict))
list(gen_dict_extract("post_title", self.json, self.change_str_dict))
list(gen_dict_extract("content", self.json, self.change_str_dict))
list(gen_dict_extract("subtitle", self.json, self.change_str_dict))
```
#### File: simple-proxy/tests/test_clean_html.py
```python
import os
import pytest
from app.utils.accept_tags import AggregateHtml
@pytest.fixture
def aggr():
with open(
os.path.join(os.path.dirname(__file__), "files", "uborka.html"), "rb"
) as f:
raw = f.read()
aggr = AggregateHtml(raw, [":thumbs_up:", ":sunny:"])
return aggr
def test_clean_html(aggr):
aggr.aggregate()
with open(
os.path.join(os.path.dirname(__file__), "files", "new_uborka.html"),
"w",
) as f:
f.write(str(aggr.soup))
``` |
{
"source": "jmarkloew/pyroSAR",
"score": 2
} |
#### File: snap/S1_SLC/S1_InSAR_coh_proc_FUN.py
```python
import pyroSAR
from pyroSAR.snap.auxil import parse_recipe, parse_node, gpt, execute
import os
import shutil
import glob
import datetime
def S1_InSAR_coh_proc(infiles, out_dir="default", tmpdir=None, t_res=20, t_crs=32633, out_format="GeoTIFF",
gpt_paras=None, pol='full', IWs=["IW1", "IW2", "IW3"], ext_DEM=False, ext_DEM_noDatVal=-9999,
ext_Dem_file=None, msk_noDatVal=False, ext_DEM_EGM=True, BGC_demResamp="BICUBIC_INTERPOLATION",
TC_demResamp="BILINEAR_INTERPOLATION", osvPath=None, cohWinRg=11, cohWinAz=3, ml_RgLook=4,
ml_AzLook=1, firstBurstIndex=None, lastBurstIndex=None, clean_tmpdir=True):
"""
function for processing InSAR coherences from S-1 SLC files in SNAP
Parameters
----------
infiles: list or str
filepaths of SLC zip files
out_dir: str or None
output folder if None a default folder structure is provided: "COH/pol/"
tmpdir: str
temporary dir for intermediate processing steps, its automatically created at cwd if none is provided
t_res: int, float
resolution in meters of final product, default is 20
t_crs: int
EPSG code of target coordinate system, default is 4326
out_format: str
format of final output, formats supported by SNAP, default is GeoTiff
gpt_paras: none or list
a list of additional arguments to be passed to the gpt call
pol: str or list or "full"
polaristations to process, "full" processes all available polarizations, default is "full"
IWs: str or list
selected subswath for processing, default is all 3
extDEM: bool
set to true if external DEM should be used in processing
ext_DEM_noDatVal: int or float
dependent on external DEM, default False
ext_DEM_file: str
path to file of external DEM, must be a format that SNAP can handle
msk_NoDatVal: bool
if true No data values of DEM, especially at sea, are masked out
ext_DEM_EGM: bool
apply earth gravitational model to external DEM, default true
imgResamp: str
image resampling method, must be supported by SNAP
demResamp: str
DEM resampling method, must be supported by SNAP
BCG_demResamp= str
resampling algorithm of Back Geo-Coding
TC_demResamp= str
resampling algorithm of terrain correction
cohWinRg: int
size of moving window for coherence estimation in range, default is 11
cohWinAz: int
size of moving window for coherence estimation in azimuth, default is 3
ml_RgLook: int
number of looks in range, default is 4
ml_AzLook: int
number of looks in azimuth, default is 1
clean_tmpdir, bool
delete tmpdir, default true
osvPath: None
specify path to locally stored OSVs, if none default OSV path of SNAP is set
Returns
-------
Raster files of selected output format for selected H-alpha features
Examples
--------
process backscatter intensities VV and VH for given SLC file
>>> from pyroSAR.snap import S1_InSAR_coh_proc
>>> filenames= ['S1B_IW_SLC__1SDV_20201229T170010_20201229T170037_024920_02F722_8B6C.zip.zip', 'S1B_IW_SLC__1SDV_20201217T170011_20201217T170038_024745_02F172_1D38.zip']
>>> gpt_paras = ["-e", "-x", "-c","35G", "-q", "16", "-J-Xms25G", "-J-Xmx75G"]
>>> pol= "full"
>>> S1_InSAR_coh_proc(infiles= filenames, gtp_paras= gpt_paras, pol= "full")
"""
##define formatName for reading zip-files
formatName = "SENTINEL-1"
##list of abbreviated month for creation of source Bands string
month_list = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
##specify tmp output format
tpm_format = "BEAM-DIMAP"
## create temp dir for intermediate .dim files
if tmpdir is None:
tmpdir = os.path.join(os.getcwd(), "tmp_dim")
if os.path.isdir(tmpdir) == False:
os.mkdir(tmpdir)
##queck if at least two files are loaded for coh estiamtion
if len(infiles) == 1:
raise RuntimeError("At least 2 scenes needed for coherence estimation")
##check if a single IW or consecutive IWs are selected
if isinstance(IWs, str):
IWs = [IWs]
if sorted(IWs) == ["IW1", "IW3"]:
raise RuntimeError("Please select single or consecutive IW")
##extract info about files and order them by date
info = pyroSAR.identify_many(infiles, verbose=False, sortkey='start')
##collect filepaths sorted by date
fps_lst = []
for fp in info:
fp_str = fp.scene
fps_lst.append(fp_str)
##check if all files are of the same relative orbit
relOrbs = []
for o in info:
orb = o.orbitNumber_rel
relOrbs.append(orb)
query_orb = relOrbs.count(relOrbs[0]) == len(relOrbs)
##raise error if different rel. orbits are detected
if query_orb == False:
raise RuntimeError("Files of different relative orbits detected")
##query and handle polarisations, raise error if selected polarisations don't match (see Truckenbrodt et al.: pyroSAR: geocode)
info_ms = info[0]
orbit = info_ms.orbit
if isinstance(pol, str):
if pol == 'full':
pol = info_ms.polarizations
else:
if pol in info_ms.polarizations:
pol = [pol]
else:
raise RuntimeError('polarization {} does not exists in the source product'.format(pol))
elif isinstance(pol, list):
pol = [x for x in pol if x in info_ms.polarizations]
else:
raise RuntimeError('polarizations must be of type str or list')
##specify auto download DEM and handle external DEM file
if ext_DEM == False:
demName = 'SRTM 1Sec HGT'
ext_DEM_file = None
else:
demName = "External DEM"
##raise error if no path to external file is provided
if ext_DEM == True and ext_DEM_file == None:
raise RuntimeError('No DEM file provided. Specify path to DEM-file')
##handle SNAP problem with WGS84 (EPSG: 4236) by manually constructing crs string (see Truckenbrodt et al.: pyroSAR: geocode)
if t_crs == 4326:
epsg = 'GEOGCS["WGS84(DD)",''DATUM["WGS84",''SPHEROID["WGS84", 6378137.0, 298.257223563]],''PRIMEM["Greenwich", 0.0],''UNIT["degree", 0.017453292519943295],''AXIS["Geodetic longitude", EAST],' 'AXIS["Geodetic latitude", NORTH]]'
else:
epsg = "EPSG:{}".format(t_crs)
##check if correct DEM resampling methods are supplied
reSamp_LookUp = ['NEAREST_NEIGHBOUR',
'BILINEAR_INTERPOLATION',
'CUBIC_CONVOLUTION',
'BISINC_5_POINT_INTERPOLATION',
'BISINC_11_POINT_INTERPOLATION',
'BISINC_21_POINT_INTERPOLATION',
'BICUBIC_INTERPOLATION']
message = '{0} must be one of the following:\n- {1}'
if BGC_demResamp not in reSamp_LookUp:
raise ValueError(message.format('demResamplingMethod', '\n- '.join(reSamp_LookUp)))
if TC_demResamp not in reSamp_LookUp:
raise ValueError(message.format('imgResamplingMethod', '\n- '.join(reSamp_LookUp)))
##query unique dates of files: selection of paired images for coherence estimation
dates_info = []
for d in info:
di = d.start.split("T")[0]
dates_info.append(di)
unique_dates_info = list(set(dates_info))
unique_dates_info = sorted(unique_dates_info, key=lambda x: datetime.datetime.strptime(x, '%Y%m%d'))
##raise error if only one unique date is supplied
if len(unique_dates_info) == 1:
raise RuntimeError("Please supply images from 2 different dates")
##check for files of the same date and put them in separate lists
pair_dates_idx = []
for a in unique_dates_info:
tmp_dates = []
for idx, elem in enumerate(dates_info):
if (a == elem):
tmp_dates.append(idx)
pair_dates_idx.append(tmp_dates)
##selection of paired files for coherence estimation
for i in range(0, len(pair_dates_idx) - 1):
fps1 = list(map(fps_lst.__getitem__, pair_dates_idx[i]))
fps2 = list(map(fps_lst.__getitem__, pair_dates_idx[i + 1]))
fps_paired = [fps1, fps2]
info_lst = [pyroSAR.identify(fps1[0]), pyroSAR.identify(fps2[0])]
##check availability of orbit state vector file
orbitType = "Sentinel Precise (Auto Download)"
match = info_lst[0].getOSV(osvType='POE', returnMatch=True, osvdir=osvPath)
match2 = info_lst[1].getOSV(osvType='POE', returnMatch=True, osvdir=osvPath)
if match is None or match2 is None:
info_lst[0].getOSV(osvType='RES', osvdir=osvPath)
info_lst[1].getOSV(osvType='RES', osvdir=osvPath)
orbitType = 'Sentinel Restituted (Auto Download)'
##build sourceBands string for coherence estimation
dates = []
for i in info_lst:
date = i.start.split("T")[0]
date_int = int(date[4:6])
month = month_list[date_int - 1]
date_tmp = date[6:8] + month + date[0:4]
dates.append(date_tmp)
##extract dates as str from filename for the day and the full datetime
date1 = info_lst[0].start.split("T")[0]
date2 = info_lst[1].start.split("T")[0]
datetime1 = info_lst[0].start
datetime2 = info_lst[1].start
##exception handling against SNAP errors
try:
date_uniq = [date1, date2]
##manage numbers of scenes needed per time step to estimate coherence, initiate sliceAssembly if necessary
if len(fps1) == 1 and len(fps2) == 1:
slcAs_fps_slv = fps1[0]
slcAs_fps_ms = fps2[0]
else:
if len(fps1) == 1 and len(fps2) > 1:
slcAs_fps_slv = fps1[0]
idx_start = 1
idx_stop = len(fps_paired)
elif len(fps1) > 1 and len(fps2) == 1:
slcAs_fps_ms = fps2[0]
idx_start = 0
idx_stop = len(fps_paired) - 1
else:
idx_start = 0
idx_stop = len(fps_paired)
## initiate sliceAssembly where the time step consists of more than one scene
for fp in range(idx_start, idx_stop):
if fp == 0:
slcAs_name = "S1_relOrb_" + str(relOrbs[0]) + "_COH_" + date_uniq[fp] + "_SLC_slv"
slcAs_out = os.path.join(tmpdir, slcAs_name)
else:
slcAs_name = "S1_relOrb_" + str(relOrbs[0]) + "_COH_" + date_uniq[fp] + "_SLC_ms"
slcAs_out = os.path.join(tmpdir, slcAs_name)
workflow_slcAs = parse_recipe("blank")
read1 = parse_node('Read')
read1.parameters['file'] = fps_paired[fp][0]
read1.parameters['formatName'] = formatName
readers = [read1.id]
workflow_slcAs.insert_node(read1)
for r in range(1, len(fps_paired[fp])):
readn = parse_node('Read')
readn.parameters['file'] = fps_paired[fp][r]
readn.parameters['formatName'] = formatName
workflow_slcAs.insert_node(readn, before=read1.id, resetSuccessorSource=False)
readers.append(readn.id)
slcAs = parse_node("SliceAssembly")
slcAs.parameters["selectedPolarisations"] = pol
workflow_slcAs.insert_node(slcAs, before=readers)
read1 = slcAs
write_slcAs = parse_node("Write")
write_slcAs.parameters["file"] = slcAs_out
write_slcAs.parameters["formatName"] = tpm_format
workflow_slcAs.insert_node(write_slcAs, before=slcAs.id)
workflow_slcAs.write("Coh_slc_prep_graph")
gpt('Coh_slc_prep_graph.xml', gpt_args=gpt_paras, outdir=tmpdir)
###import sliceAssemblies according to how many files per time step are needed
if len(fps1) > 1 and len(fps2) == 1:
slcAs_fps_slv = glob.glob(os.path.join(tmpdir, "*" + "_SLC_slv.dim"))
elif len(fps1) == 1 and len(fps2) > 1:
slcAs_fps_ms = glob.glob(os.path.join(tmpdir, "*" + "_SLC_ms.dim"))
elif len(fps1) > 1 and len(fps2) > 1:
slcAs_fps_slv = glob.glob(os.path.join(tmpdir, "*" + "_SLC_slv.dim"))
slcAs_fps_ms = glob.glob(os.path.join(tmpdir, "*" + "_SLC_ms.dim"))
##start coherence estimation for each IW
for p in pol:
for iw in IWs:
# my_source = "coh_"+ iw + "_"+ p+ "_"+ dates[1] +"_"+ dates[0]
##create out_name
out_name = "S1_relOrb_" + str(
relOrbs[0]) + "_" + iw + "_COH_" + p + "_" + date2 + "_" + date1 + "_TPD"
tmp_out = os.path.join(tmpdir, out_name)
##parse_workflows
##coherence calculation per IW
workflow_coh = parse_recipe("blank")
read1 = parse_node("Read")
read1.parameters["file"] = slcAs_fps_ms
if len(fps2) == 1:
read1.parameters["formatName"] = formatName
workflow_coh.insert_node(read1)
aof = parse_node("Apply-Orbit-File")
aof.parameters["orbitType"] = orbitType
aof.parameters["polyDegree"] = 3
aof.parameters["continueOnFail"] = False
workflow_coh.insert_node(aof, before=read1.id)
ts = parse_node("TOPSAR-Split")
ts.parameters["subswath"] = iw
ts.parameters["selectedPolarisations"] = p
# ts.parameters["firstBurstIndex"]= burst_span1[0]
# ts.parameters["lastBurstIndex"]= burst_span1[1]
workflow_coh.insert_node(ts, before=aof.id)
read2 = parse_node('Read')
read2.parameters['file'] = slcAs_fps_slv
if len(fps1) == 1:
read2.parameters['formatName'] = formatName
workflow_coh.insert_node(read2)
aof2 = parse_node("Apply-Orbit-File")
aof2.parameters[
"orbitType"] = orbitType # 'Sentinel Restituted (Auto Download)' Sentinel Precise (Auto Download)
aof2.parameters["polyDegree"] = 3
aof2.parameters["continueOnFail"] = False
workflow_coh.insert_node(aof2, before=read2.id)
ts2 = parse_node("TOPSAR-Split")
ts2.parameters["subswath"] = iw
ts2.parameters["selectedPolarisations"] = p
# ts2.parameters["firstBurstIndex"]= burst_span2[0]
# ts2.parameters["lastBurstIndex"]= burst_span2[1]
workflow_coh.insert_node(ts2, before=aof2.id)
bgc = parse_node("Back-Geocoding")
bgc.parameters["demName"] = demName
bgc.parameters["demResamplingMethod"] = BGC_demResamp
bgc.parameters["externalDEMFile"] = ext_Dem_file
bgc.parameters["externalDEMNoDataValue"] = ext_DEM_noDatVal
bgc.parameters["resamplingType"] = "BISINC_5_POINT_INTERPOLATION"
bgc.parameters["maskOutAreaWithoutElevation"] = msk_noDatVal
workflow_coh.insert_node(bgc, before=[ts.id, ts2.id])
coh = parse_node("Coherence")
coh.parameters["subtractFlatEarthPhase"] = True
coh.parameters["singleMaster"] = True
coh.parameters["cohWinRg"] = cohWinRg
coh.parameters["cohWinAz"] = cohWinAz
coh.parameters["demName"] = demName
coh.parameters["subtractTopographicPhase"] = True
coh.parameters["externalDEMFile"] = ext_Dem_file
coh.parameters["externalDEMNoDataValue"] = ext_DEM_noDatVal
coh.parameters["externalDEMApplyEGM"] = True
workflow_coh.insert_node(coh, before=bgc.id)
tpd = parse_node("TOPSAR-Deburst")
tpd.parameters["selectedPolarisations"] = p
workflow_coh.insert_node(tpd, before=coh.id)
write_coh = parse_node("Write")
write_coh.parameters["file"] = tmp_out
write_coh.parameters["formatName"] = tpm_format
workflow_coh.insert_node(write_coh, before=tpd.id)
##write graph
workflow_coh.write("Coh_tmp_prep_graph")
##execute graph via gpt
execute('Coh_tmp_prep_graph.xml', gpt_args=gpt_paras)
###combining the IWs
##filepaths of temporary files
# search_criteria = "S1_relOrb_"+ str(info[0].orbitNumber_rel)+ "*"+p +"_"+ date2+"_"+ date1+"_TPD.dim"
# dirpath= os.getcwd()
# q = os.path.join(dirpath, search_criteria)
tmp_fps = glob.glob(os.path.join(
tmpdir , "S1_relOrb_" + str(relOrbs[0]) + "*" + p + "_" + date2 + "_" + date1 + "_TPD.dim"))
if len(IWs) == 1:
tpm_source = "coh_" + IWs[0] + "_" + p + "_" + dates[1] + "_" + dates[0]
else:
tpm_source = "coh_" + p + "_" + dates[1] + "_" + dates[0]
##create outputname based on the number of selected IWs
if len(IWs) == 3:
tpm_name = "S1_" + orbit + "_relOrb_" + str(
relOrbs[0]) + "_COH_" + p + "_" + datetime2 + "_" + datetime1
else:
separator = "_"
iw_str = separator.join(IWs)
tpm_name = "S1_" + orbit + "_relOrb_" + str(
relOrbs[0]) + "_COH_" + iw_str + "_" + p + "_" + datetime2 + "_" + datetime1
##create default output folder based on selected polarizations
if out_dir is None:
out_dir_p = os.path.join("COH",p)
if os.path.isdir(out_dir_p) == False:
os.makedirs(os.path.join(os.getcwd(), out_dir_p))
elif os.path.isdir(out_dir):
out_dir_fp = out_dir
else:
raise RuntimeError("Please provide a valid filepath")
final_out_fp = os.path.join(out_dir_p, tpm_name)
##create workflow for merging
workflow_tpm = parse_recipe("blank")
read1 = parse_node('Read')
read1.parameters['file'] = tmp_fps[0]
workflow_tpm.insert_node(read1)
##handling multiple vs single IW
if len(tmp_fps) > 1:
readers = [read1.id]
for t in range(1, len(tmp_fps)):
readn = parse_node('Read')
readn.parameters['file'] = tmp_fps[t]
workflow_tpm.insert_node(readn, before=read1.id, resetSuccessorSource=False)
readers.append(readn.id)
tpm = parse_node("TOPSAR-Merge")
tpm.parameters["selectedPolarisations"] = p
workflow_tpm.insert_node(tpm, before=readers)
last_id = tpm.id
else:
last_id = read1.id
##multi looking for either one IW or multiple ones
ml = parse_node("Multilook")
ml.parameters["sourceBands"] = tpm_source
ml.parameters["nRgLooks"] = ml_RgLook
ml.parameters["nAzLooks"] = ml_AzLook
ml.parameters["grSquarePixel"] = True
ml.parameters["outputIntensity"] = False
workflow_tpm.insert_node(ml, before=last_id)
tc = parse_node("Terrain-Correction")
tc.parameters["sourceBands"] = tpm_source
tc.parameters["demName"] = demName
tc.parameters["externalDEMFile"] = ext_Dem_file
tc.parameters["externalDEMNoDataValue"] = ext_DEM_noDatVal
tc.parameters["externalDEMApplyEGM"] = ext_DEM_EGM
tc.parameters["demResamplingMethod"] = TC_demResamp
tc.parameters["imgResamplingMethod"] = TC_demResamp
tc.parameters["pixelSpacingInMeter"] = t_res
tc.parameters["mapProjection"] = t_crs
tc.parameters["saveSelectedSourceBand"] = True
tc.parameters["outputComplex"] = False
tc.parameters["nodataValueAtSea"] = msk_noDatVal
workflow_tpm.insert_node(tc, before=ml.id)
write_tpm = parse_node("Write")
write_tpm.parameters["file"] = final_out_fp
write_tpm.parameters["formatName"] = out_format
workflow_tpm.insert_node(write_tpm, before=tc.id)
##write graph and execute graph
workflow_tpm.write("Coh_TPM_continued_proc_graph")
execute('Coh_TPM_continued_proc_graph.xml', gpt_args=gpt_paras)
# exception for SNAP errors & creating error log
except RuntimeError as e:
print(str(e))
with open("S1_COH_proc_ERROR_" + datetime1 + "_" + datetime2 + ".log", "w") as logf:
logf.write(str(e))
##clean tmp folder to avoid overwriting errors even if exception is valid
files = glob.glob(os.path.join(tmpdir,'*'))
for f in files:
if os.path.isfile(f) or os.path.islink(f):
os.unlink(f)
elif os.path.isdir(f):
shutil.rmtree(f)
continue
##clean tmp folder to avoid overwriting errors
files = glob.glob(os.path.join(tmpdir,'*'))
for f in files:
if os.path.isfile(f) or os.path.islink(f):
os.unlink(f)
elif os.path.isdir(f):
shutil.rmtree(f)
if clean_tmpdir == True:
shutil.rmtree(tmpdir)
``` |
{
"source": "jmarkowski/webapp-template",
"score": 3
} |
#### File: application/tests/test-webui-basics.py
```python
import unittest
from flask import current_app
from webui import create_app
class TestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing', logger=__name__)
# Propogate exceptions to the test client
self.app.testing = True
# Create the test client
self.client = self.app.test_client()
# Bind the application context to the current context.
self.app_context = self.app.app_context()
self.app_context.push()
def tearDown(self):
self.app_context.pop()
def test_app_exists(self):
self.assertFalse(current_app is None)
def test_app_is_testing(self):
self.assertTrue(current_app.config['TESTING'])
if __name__ == '__main__':
unittest.main()
```
#### File: application/util/__init__.py
```python
import sys
def abort(msg):
sys.exit(f'ABORT: {msg}')
``` |
{
"source": "jmark/turbubox",
"score": 2
} |
#### File: sandbox/bin/teardrop-flash.py
```python
import os, sys, pickle
import numpy as np
# jmark
import flash, ulz, dslopts
from defer_signals import DeferSignals
from matplotlib import pylab as plt
with dslopts.Manager(scope=globals(),appendix="flashfiles are be defined after '--'.") as mgr:
mgr.add('sinkfptmpl', 'path template to store the pickle files: <dir>/03d%.pickle', str, '')
mgr.add('usemultiproc', 'enable multiprocessing', dslopts.bool, True)
mgr.add('skipfiles', 'skip already existing files', dslopts.bool, False)
def log(msg):
print(msg, file=sys.stderr)
def pdf(data, nbins=10000):
return np.histogram(data, bins=nbins, density=True)
def task(taskid, srcfp):
# prepare sink file path
try:
snkfp = sinkfptmpl % taskid
except TypeError:
snkfp = sinkfptmpl
# skip already done files
if skipfiles and os.path.isfile(snkfp):
log('%s skipped.' % snkfp)
return
# open flash file
fls = flash.File(srcfp, 'r')
# ndarrays
velx = fls.data('velx')
vely = fls.data('vely')
velz = fls.data('velz')
# scalars
time = fls.realscalars['time']
step = fls.integerscalars['nstep']
Q = ulz.Q(velx,vely,velz).ravel()
R = ulz.R(velx,vely,velz).ravel()
nbins = 200 # number of bins in each dimension
binrange = [[-0.1, 0.1], [-0.2, 0.1]]
pdf, xbins, ybins = np.histogram2d(R,Q, bins=nbins, range=binrange)
pdf /= pdf.sum()
#pdf = np.log10(pdf)
# i0 = (np.abs(xbins + 1)).argmin()
# i1 = (np.abs(xbins - 1)).argmin()
# j0 = (np.abs(ybins + 1)).argmin()
# j1 = (np.abs(ybins - 1)).argmin()
#print(i0,i1,j0,j1)
#print(xbins.min(), xbins.max())
#print(ybins.min(), ybins.max())
#pdf = pdf[i0:i1,j0:j1]
plt.figure(figsize=(10,10))
#plt.imshow(pdf, origin='lower right')
plt.imshow(pdf)
plt.show()
log(snkfp)
srcfiles = map(str.rstrip, ARGV_TAIL)
if usemultiproc:
from multiprocessing import Pool
def _task(x):
return task(x[0],x[1])
Pool().map(_task,enumerate(srcfiles))
else:
for taskid, fp in enumerate(srcfiles):
task(taskid, fp)
```
#### File: plot/batch/pws.py
```python
import pickle
import argparse
from pathlib import Path
from collections import namedtuple
from decayturb import *
import box
import re
import scipy.stats
import numpy as np
from matplotlib import pylab as plt
## ------------------------------------------------------------------------- #
pp = argparse.ArgumentParser(description = 'Plotting Powerspectrum')
pp.add_argument(
'--setup',
help='task identifier',
type=str.lower, #required=True,
default='default',
)
pp.add_argument(
'--pickle',
help='path of pickle file',
type=Path, required=True,
)
pp.add_argument(
'--output',
help='path of png file',
type=str, required=True,
)
def parseSlice(argstr):
mm = re.match('(\d+):(\d+)', argstr)
if mm:
a,b = mm.groups()
return slice(int(a),int(b))
else:
a = int(argstr)
return slice(a,a+1)
pp.add_argument(
'--index',
help='set index',
type=parseSlice, #required=True,
)
pp.add_argument(
'--key',
help='keyword method: pws pws2 pws...',
type=str, #required=True,
)
pp.add_argument(
'--subkey',
help='keyword: dens rmsv ekin',
type=str, #required=True,
)
pp.add_argument('--ylabel')
pp.add_argument('--title')
pp.add_argument(
'--xrange',
type=lambda arg: tuple(float(x) for x in arg.split(':')),
#default=(-4,4),
)
pp.add_argument(
'--yrange',
type=lambda arg: tuple(float(x) for x in arg.split(':')),
#default=(-7,2),
)
pp.add_argument(
'--fit', action='store_true',
)
ARGV = pp.parse_args()
with open(ARGV.pickle, 'rb') as fh:
runs = box.Box(pickle.load(fh))
## ------------------------------------------------------------------------- #
## Set custom configurations
# setting default values
output = ARGV.output
index = ARGV.index
key = ARGV.key
subkey = ARGV.subkey
xrange = ARGV.xrange
yrange = ARGV.yrange
title = '%s/%s powerspectra at Dynamic Time: t_d = %.2f' % (
key, subkey,
runs.order[0].anal['scalars']['dyntime'][index][0])
xlabelBot = r'log. scale spatial wave number log$_{10}(k)$'
xlabelTop = r'spatial wave number $k$'
ylabel = 'log. scale FFT[f(k)]'
fit = ARGV.fit
dyntime = runs.order[0].anal['scalars']['dyntime'][index][0]
if False:
pass
elif ARGV.setup == 'mass-weighted/velocity':
key = '<KEY>'
subkey = 'rmsv'
ylabel = r'log. scale Fourier transformed velocity log$_{10}(\hat{u})$'
title = r'Shell-averaged Powerspectra of Three-dimensional Mass-weighted' \
+ r' Velocity Field at Dynamic Time $t_d$ = %.1f' % (dyntime)
elif ARGV.setup == 'volume-weighted/velocity':
key = '<KEY>'
subkey = 'rmsv'
ylabel = r'log. scale Fourier transformed velocity log$_{10}(\hat{u})$'
title = r'Shell-averaged Powerspectra of Three-dimensional Volume-weighted' \
+ r' Velocity Field at Dynamic Time $t_d$ = %.1f' % (dyntime)
elif ARGV.setup == 'volume-weighted/density':
key = 'pws2'
subkey = 'dens'
ylabel = r'log. scale Fourier transformed density log$_{10}(\hat{\rho})$'
title = r'Shell-averaged Powerspectra of Volume-weighted' \
+ r' Density Field at Dynamic Time $t_d$ = %.1f' % (dyntime)
elif ARGV.setup == 'volume-weighted/ekin':
key = 'pws2'
subkey = 'ekin'
ylabel = r'log. scale Fourier transformed kinetic energy log$_{10}(\hat{\mathcal{K}})$'
title = r'Shell-averaged Powerspectra of Volume-weighted' \
+ r' Kinetic Energy Field at Dynamic Time $t_d$ = %.1f' % (dyntime)
else:
raise NotImplementedError("Setup '%s' is unknown." % ARGV.setup)
## ------------------------------------------------------------------------- #
## Setup figure
fig = plt.figure(figsize=(12,6))
axB = fig.add_subplot(111)
if yrange:
plt.ylim(*yrange)
plt.ylabel(ylabel)
plt.title(title, y=1.1)
## ------------------------------------------------------------------------- #
Xs,Ys = list(),list()
ii = index
for run in runs.order:
df = run.anal[key][subkey][ii]
# take averaged area
nn, carea = 0.,0.
for _xs,_ys, *slurp in df:
nn += 1.
carea += np.trapz(_ys,_xs) / 512**6
area = carea / nn
# take averaged area deviation
nn, carea = 0.,0.
for _xs,_ys, *slurp in df:
nn += 1.
carea += (area - np.trapz(_ys,_xs) / 512**6)**2
darea = 0.1 * area + np.sqrt(carea / nn)
## --------------------------------------------------------------------- #
# get small scale area
# set analysis domain
_xs_ = np.linspace(64,256,512)
exactAs = []
# take averaged area
nn, carea = 0.,0.
for _xs,_ys, *slurp in df:
_ys_ = np.interp(_xs_,_xs,_ys)
nn += 1.
carea += np.trapz(_ys_,_xs_) / 512**6
exactAs.append(slurp)
areaSmall = carea / nn
exactAs = np.array(exactAs).T
# take averaged area deviation
nn, carea = 0.,0.
for _xs,_ys, *slurp in df:
_ys_ = np.interp(_xs_,_xs,_ys)
nn += 1.
carea += (areaSmall - np.trapz(_ys_,_xs_) / 512**6)**2
dareaSmall = 0.1 * areaSmall + np.sqrt(carea / nn)
## --------------------------------------------------------------------- #
# get original data
_xs,_ys, *slurp = df[0]
# set analysis domain
xs = np.linspace(np.log10(_xs[0]),np.log10(_xs[-1]),1024)
# take averaged codomain
nys,cys = 0.,0.
for _xs,_ys, *slurp in df:
nys += 1.
cys += np.interp(xs,np.log10(_xs),np.log10(_ys))
ys = cys / nys
# take averaged deviation
nys,cys = 0.,0.
for _xs,_ys, *slurp in df:
nys += 1.
cys += (ys - np.interp(xs,np.log10(_xs),np.log10(_ys)))**2
dys = 1.4 * np.sqrt(cys / nys)
Xs.append(xs); Ys.append(ys)
#ys,xs = ulz.moving_avg_1d(ys,xs,7)
## --------------------------------------------------------------------- #
if fit:
__xs = np.linspace(0.6,1.2,100)
__ys = np.interp(__xs,xs,ys)
#slope, ofs = np.polyfit(__xs, __ys,1)
slope, ofs, r_value, p_value, stderr = scipy.stats.linregress(__xs, __ys)
__ys = np.interp(__xs,xs,ys-dys)
#slopeL, ofsL = np.polyfit(__xs, __ys,1)
slopeL, ofsL, r_valueL, p_valueL, stderrL = scipy.stats.linregress(__xs, __ys)
__ys = np.interp(__xs,xs,ys+dys)
slopeU, ofsU = np.polyfit(__xs, __ys,1)
slopeU, ofsU, r_valueU, p_valueU, stderrU = scipy.stats.linregress(__xs, __ys)
dslope = np.abs(slopeU - slopeL) + stderr + stderrL + stderrU
dofs = np.abs(ofsU - ofsL) + stderr + stderrL + stderrU
#print(run.label, "\t", *['%.4f' % x for x in [slope, ofs, area/512**6]])
#print(run.label, "\t", *['%.4f' % x for x in [slope, ofs, dslope, dofs]])
#print(run.id, dyntime,
# uc.ufloat(slope, dslope),
# uc.ufloat(ofs, dofs),
# uc.ufloat(area, darea),
# sep="\t")
#print(exactAs)
#sys.exit()
foo = sum([[np.mean(x[0]), np.std(x[0])] for x in exactAs],[])
#print(run.id, dyntime, slope, dslope, ofs, dofs, area, darea, areaSmall, dareaSmall, sep="\t")
print(run.id, *foo, (foo[4] + foo[6])/2., (foo[5] + foo[7])/2., sep="\t")
line_xs = np.linspace(*plt.gca().get_xlim(),10)
axB.plot(line_xs, slope*line_xs + ofs, ls=':', lw=1, color=run.color, label='_nolegend_')
## --------------------------------------------------------------------- #
axB.fill_between(xs, ys-dys, ys+dys, facecolor='grey', alpha=0.5)
axB.plot(xs,ys,label=run.label,lw=1.5,ls=run.line,color=run.color)
## ------------------------------------------------------------------------- #
## Plot average of all curves
if False:
Xs = np.mean(np.array(Xs),axis=0)
Ys = np.mean(np.array(Ys),axis=0)
plt.plot(Xs,Ys,':',label='average', color='black')
## ------------------------------------------------------------------------- #
if False and fit:
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
f = gauss
coeff, var_matrix = scipy.optimize.curve_fit(f, Xs, Ys, p0=[1., 0., 1.])
xs = np.linspace(*xlim,100)
ys = f(xs, *coeff)
#label = 'gaussian fit: A=%6.3f, mu=%6.3f, sigma=%6.3f' % tuple(coeff)
label = 'gaussian fit'
plt.plot(xs,np.log10(ys),'--',label=label, color='black')
f = skew_pdf
#coeff = [2,1,0.7,-20]
coeff = [*coeff,1.0]
coeff, var_matrix = scipy.optimize.curve_fit(f, Xs, Ys, p0=coeff)
xs = np.linspace(*xlim,100)
ys = f(xs, *coeff)
#label = 'pdf-skew fit: A=%6.3f, mu=%6.3f, sigma=%6.3f, alpha=%6.3f' % tuple(coeff)
label = 'pdf-skew'
plt.plot(xs,np.log10(ys),'-',label=label, color='black')
xticks = np.arange(-0.6,3.0,0.2)
if False and fit:
_xticks = np.linspace(0.6,1.4,10)
axB.plot(_xticks, -1.1 * _xticks + 17.5, ls=':', lw=2, color='black', label='-1.1 • log10(k) + 17.5')
#axB.plot(_xticks, -19/9 * _xticks + 17.8, ls=':', lw=2, color='black', label='-19/9 • log10(k) + 17.8')
#axB.plot(_xticks, -5/3 * _xticks + 17, ls=':', lw=2, color='black', label='-5/3 • log10(k) + 17.8')
## top x-axis
axT = axB.twiny()
axT.set_xticks(xticks)
axT.set_xticklabels(['%.2f' % (10**x) for x in xticks])
axT.set_xlabel(xlabelTop)
axT.set_xlim(xticks[0],xticks[-1])
## bottom x-axis
axB.set_xlabel(xlabelBot)
axB.set_xlim(xticks[0],xticks[-1])
axB.set_xticks(xticks)
axB.grid()
axB.legend(ncol=1)
plt.tight_layout()
if output in 'show':
plt.show()
elif output in 'none':
pass
else:
plt.savefig(str(output), format='png')
```
#### File: sandbox/plot/diss-over-mach.py
```python
import sys
import pickle
import numpy as np
from matplotlib import pylab as plt
import matplotlib
matplotlib.rc('font', family='DejaVu Sans')
matplotlib.rcParams.update({'font.size': 20})
def moving_sum(xs, n=2):
return np.array([np.sum(x) for x in xs[:xs.shape[0]//n * n].reshape(-1,n)])
def mymean(xs):
avg = np.mean(xs)
return np.mean([x for x in xs if np.abs((avg-x)/avg) <= 0.5])
def moving_avg(xs, n=2):
return np.array([mymean(x) for x in xs[:xs.shape[0]//n * n].reshape(-1,n)])
import cycler
ccycle = cycler.cycler('color', ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00'])
plt.rc('axes', prop_cycle=ccycle)
# [01]time
# [02]dt
# [03]EkinLastStp
# [04]EkinBefFor [05]EkinAftFor
# [06]dEkin(diss) [07] ([06]/dt)
# [08]dEkin(forc) [09] ([08]/dt)
# [10]Vrms_vw_BF [11]Vrms_vw_AF [12]Vrms_mw_BF [13]Vrms_mw_AF
# [14]Eint [15]dEint [16]dEint/dt [17]Emag [18]dEmag
# [19]dEmag/dt [20]Epot [21]dEpot [22]dEpot/dt
# [23]dErad [24]dErad/dt [25]E_bulk_BF [26]dE_bulk_corr [27] ([23]/dt)
# [28]E_bulk_AF [29]dE_bulk_AF [30] ([24]/dt)
MACH = 10
clen = 1
ttc = clen / MACH
fps = sys.argv[1::2] # files paths
lgs = sys.argv[2::2] # legends labels
# dissipation rate
for fp,lg in zip(fps,lgs):
data = np.load(fp)
data = data.T
mach = data[12]
dKdt = -data[6]
temp = np.array([[m,d] for m,d in zip(mach,dKdt) if m <= 10]).T
temp = np.array(sorted(temp.T, key=lambda x: x[0])).T
xs = moving_avg(temp[0], 5)
ys = moving_avg(temp[1], 5)
plt.plot(xs, ys, '-', lw=3, label=lg)
plt.title("Turbulent Box (Mach = %d): Dissipation Rate over Mach" % MACH)
plt.xlabel('sonic mach number')
plt.ylabel('dissipation rate: -ΔK/Δt')
plt.xlim(2,10)
plt.ylim(0,1550)
plt.grid()
plt.legend(loc='upper left')
plt.show()
# fig = matplotlib.pyplot.gcf()
# fig.set_size_inches(18.5, 10.5)
# plt.tight_layout()
# plt.savefig(sys.stdout.buffer,format='png', bbox_inches='tight')
```
#### File: plot/StirTurb_CGM/minmax.py
```python
import os, sys, pickle
import numpy as np
from pathlib import Path
def put(msg):
print(msg, file=sys.stderr, flush=True)
## ========================================================================= ##
## process commandline arguments
import argparse
pp = argparse.ArgumentParser(description = 'Batch Min Max Analysis Cache Files')
pp.add_argument(
'--snkfp',
help='path to store minmax.pickle',
type=Path, required=True,
)
pp.add_argument(
'--skip',
type=bool,
default=False,
help='skip already produced files',
)
pp.add_argument(
'--pickles',
help='list of snapshot files',
type=Path,nargs='*', required=True
)
ARGV = pp.parse_args()
key = 'pdf_vw'
subkey = 'dens'
dd = {
key: {subkey: {'min': [999,999], 'max': [0,0]}},
}
for pid, srcfp in enumerate(ARGV.pickles):
with open(str(srcfp), 'rb') as fh: data = pickle.load(fh)
xs = data[key][subkey][1]
ys = data[key][subkey][0]
dd[key][subkey]['min'][1] = min(dd[key][subkey]['min'][1], np.min(xs))
dd[key][subkey]['max'][1] = max(dd[key][subkey]['max'][1], np.max(xs))
dd[key][subkey]['min'][0] = min(dd[key][subkey]['min'][0], np.min(ys))
dd[key][subkey]['max'][0] = max(dd[key][subkey]['max'][0], np.max(ys))
with open(str(ARGV.snkfp), 'wb') as fh: pickle.dump(dd, fh)
```
#### File: tools/lib/flash.py
```python
import sys
import ulz
import numpy as np
import h5 as h5
import interpolate as itpl
class File(h5.File):
def __init__(self, fpath, mode='r', **kwargs):
super().__init__(fpath, mode)
self.framework = 'flash'
# transform meta information to python data types
self.siminfo = self.get('sim info')
self.refine_levels = self.get('refine level')[()]
self.maxrefinelevel = np.max(self.refine_levels)
self.is_multilevel = any(level > 1 for level in self.refine_levels)
self.realscalars = h5.dataset_to_dict(self.get('real scalars'))
self.realruntime = h5.dataset_to_dict(self.get('real runtime parameters'))
self.integerscalars = h5.dataset_to_dict(self.get('integer scalars'))
self.integerruntime = h5.dataset_to_dict(self.get('integer runtime parameters'))
for key in 'nblockx nblocky nblockz'.split():
if not key in self.integerruntime:
self.integerruntime[key] = 1
if self.integerscalars['nzb'] == 1:
self.ndims = 2
else:
self.ndims = 3
self.coords = self.get('coordinates')[()]
self.gridsize = self.calc_gridsize(self.maxrefinelevel)
self.grid = np.array([[0,0,0], self.gridsize-1])
self.domain = np.array([
[self.realruntime[x] for x in 'xmin ymin zmin'.split()],
[self.realruntime[x] for x in 'xmax ymax zmax'.split()]
])
self.domainsize = np.abs(self.domain[1]-self.domain[0])
self.domsize = self.domainsize
self.blocksize = np.array([self.integerscalars[x] for x in 'nxb nyb nzb'.split()])
self.cellsize = self.domainsize / self.gridsize
self.cellvolume = np.prod(self.cellsize)
# shortcut to general parameters useful in analysis
self.params = dict()
self.params['time'] = self.realscalars['time']
self.params['dt'] = self.realscalars['dt']
self.params['gamma'] = self.realruntime['gamma']
self.params['kappa'] = self.realruntime['gamma'] # synonym
self.gamma = self.params['gamma']
self.time = self.params['time']
if self.ndims == 2:
self.extent = tuple(self.realruntime[k] for k in 'xmin xmax ymin ymax'.split())
else:
self.extent = tuple(self.realruntime[k] for k in 'xmin xmax ymin ymax zmin zmax'.split())
def __getattr__(self, name):
if name in self.realscalars:
return self.realscalars[name]
if name in self.realruntime:
return self.realruntime[name]
if name in self.integerscalars:
return self.integerscalars[name]
if name in self.integerruntime:
return self.integerruntime[name]
raise AttributeError('Unknown attritube: {}'.format(name))
def data(self,dname):
return self.get_data(dname)
def calc_gridsize(self,rlevel):
gridsize = np.array([self.integerruntime[N] * self.integerscalars[n]*2**(rlevel-1)
for N,n in zip('nblockx nblocky nblockz'.split(), 'nxb nyb nzb'.split())]).astype(np.int)
if self.ndims == 2:
gridsize = np.array((gridsize[0],gridsize[1],1))
if not self.is_multilevel: # handle uniform grid
gridsize *= np.array([self.integerscalars[key] for key in 'iprocs jprocs kprocs'.split()])
return gridsize
def get_data(self,dname,shape=None,method='nearest'):
levels = self.get('refine level')[()]
coords = self.get('coordinates')[()]
bsizes = self.get('block size')[()]
ntype = self.get('node type')[()]
domsize = self.domainsize
if self.ndims == 2:
domsize[2] = 1.0
coords -= self.domain[0]
coords /= domsize
bsizes /= domsize
if shape == None:
shape = 2**(self.maxrefinelevel-1) * self.blocksize
if self.ndims == 2:
image = np.zeros(shape[0:2])
if isinstance(dname,str):
blocks = self.get(dname)[()].astype(np.float64)
blocks = np.transpose(blocks,(0,3,2,1))
else:
blocks = dname
itpl.cells_to_image_2d(ntype,coords,bsizes,blocks,image,method=method)
elif self.ndims == 3:
image = np.zeros(shape)
if isinstance(dname,str):
blocks = self.get(dname)[()].astype(np.float64)
blocks = np.transpose(blocks,(0,3,2,1))
else:
blocks = dname
itpl.cells_to_image_3d(ntype,coords,bsizes,blocks,image,method=method)
else:
raise RuntimeError('Unknown dimension.')
return image
def as_box(self,dname):
return self.get_data(dname)
def get_prims(self,**kwargs):
return [self.get_data(dname, **kwargs) for dname in 'dens velx vely velz pres'.split()]
def get_cons(self,gamma=5./3.):
return ulz.navier_primitive_to_conservative(self.get_prims(), gamma)
# experimental!
def set_data(self,dname,box):
if self.is_multilevel:
raise NotImplementedError('Setting data for multilevel grids (AMR) is not supported yet!')
if box.shape != tuple(self.gridsize):
raise ValueError('Given box shape does not match gridsize!')
# auxiliary variables for code clarity: shape: (3,)
gridsize = self.gridsize
domsize = self.domainsize
offset = -(self.domain[0])
blksize = self.blocksize
linspace = ulz.mk_body_centered_linspace
X,Y,Z = np.meshgrid(*tuple(linspace(-1, 1, nb) for nb in blksize))
coords = self.get('coordinates') # shape: (#bids,3)
blocks = self.h5file.get(dname) # shape: (#bids,nxb*nyb*nzb)
# note: using numpy broadcasting kung-fu: shape: coords.shape
positions = np.round((coords+offset)/domsize * gridsize).astype(np.int)
for bid, pos in enumerate(positions):
I = np.array((pos-blksize//2,pos+blksize//2)).transpose()
blocks[bid] = box[[slice(*i) for i in I]].transpose((2,1,0))
#blocks.file.flush()
def list_datasets(self):
return self.h5file.keys()
def to_hdf(self,fpath): # WIP!
outfile = h5.File(fpath,'w')
outfile.create_dataset("DIMS", data=self.meta['grid size'])
outfile.create_dataset("CELL VOL", data=self.meta['cell size'])
outfile.create_dataset("BOX VOL", data=self.meta['domain size'])
outfile.create_dataset("MIN BOUNDS", data=self.meta['min bounds'])
outfile.create_dataset("MAX BOUNDS", data=self.meta['max bounds'])
time = self.meta['real scalars']['time']
step = float(self.meta['integer scalars']['nstep'])
dt = 0.0
outfile.create_dataset("SIM_INFO", data=np.array([time,step,dt]))
outfile.create_dataset("dens", data=self.get_box('dens').reshape(-1))
outfile.create_dataset("velx", data=self.get_box('velx').reshape(-1))
outfile.create_dataset("vely", data=self.get_box('vely').reshape(-1))
outfile.create_dataset("velz", data=self.get_box('velz').reshape(-1))
outfile.close()
if __name__ == '__main__':
fp = sys.argv[1]
fh = File(fp)
# dens = fh.get_data('dens')
# print(dens.shape)
if 0:
levels = fh.get('refine level')[()]
coords = fh.get('coordinates')[()]
bsizes = fh.get('block size')[()]
ntype = fh.get('node type')[()]
domsize = fh.domainsize
coords -= fh.domain[0]
coords /= domsize
bsizes /= domsize
p = np.array([0.0,0.0,0.1])
u = np.array([1.0,0.0,0.0])
v = np.array([0.0,1.0,0.0])
nedges = itpl.plane_morton_to_coords(ntype,coords,bsizes, p,u,v, edges=None)
edges = np.zeros([nedges,2,3])
nedges = itpl.plane_morton_to_coords(ntype,coords,bsizes, p,u,v, edges=edges)
print(nedges)
print(edges)
levels = fh.get('refine level')[()]
coords = fh.get('coordinates')[()]
bsizes = fh.get('block size')[()]
ntype = fh.get('node type')[()]
coords -= fh.domain[0]
coords /= fh.domsize
bsizes /= fh.domsize
method = 'nearest'
image = np.zeros(3*(128,))
cells = fh.get('minloc')[()].astype(np.float64)
# print(cells.shape)
# cells = fh.get('minloc')[()].astype(np.float64)
# cells = np.transpose(fh.get('dens'),(0,3,2,1))
# print(cells.shape)
itpl.cells_to_image_3d(ntype,coords,bsizes,cells,image,method=method)
print(np.max(cells))
print(np.max(image))
```
#### File: tools/lib/ulz.py
```python
import itertools
import hashlib
import numpy as np
import os
def str2bool(str):
if str.lower() in 't true yes on'.split():
return True
if str.lower() in 'f false no off'.split():
return False
raise ValueError("%s cannot be converted to boolean" % str)
def coerce(str):
for f in [int,float,str2bool]:
try:
return f(str)
except:
pass
return str
def gen_key(unit,grid,solv,deli='/'):
return '%s%s%d%s%s' % (unit,deli,grid,deli,solv)
def gen_amr_path(key):
return '/srv/data/FLASH/stirturb/mach-2.0/%s' % key
def gen_pickle_path(key):
cachedir = '/tmp'
#uniqueid = hashlib.md5(key.encode()).hexdigest()
uniqueid = key.replace('/','.')
return '%s/time-evolution.%s.pandas.pickle' % (cachedir,uniqueid)
def gen_hdf5_path(key):
cachedir = '/tmp'
uniqueid = key.replace('/','.')
return '%s/time-evolution.%s.pandas.h5' % (cachedir,uniqueid)
units = 'unit cgs'.split()
grids = [16,24,32,48,64]
solvs = '8w b3 b5 es'.split()
def load_datafiles(units,grids,solvs):
keys = [gen_key(*key) for key in itertools.product(units,grids,solvs)]
dfs = [pd.read_hdf(gen_hdf5_path(key),'table') for key in keys]
return dict(zip(keys,dfs))
def turntime(key):
if 'unit' in key:
return 1/2.0
elif 'cgs' in key:
return 4.385e+14
def sort_unstructured_grid(ugrid, udata, doReshape=True):
ndim = ugrid.shape[-1]
tdat = ugrid.dtype
# structured/sorted grid + sorting indices
grid,idx = np.unique(ugrid.view(tdat.descr * ndim), return_index=True)
grid = grid.view(tdat).reshape(-1,ndim)
data = udata.ravel()[idx]
if doReshape:
# try to reshape sorted data to ndimensional block of n cells
n = int(np.round(np.power(grid.shape[0],1/ndim)))
grid = grid.reshape([n]*ndim + [ndim])
data = data.reshape([n]*ndim)
return grid, data
def mk_cartesian_product_2d(xs,ys):
return np.transpose(np.meshgrid(xs,ys)).reshape(-1,2)
def mk_cartesian_product_3d(xs,ys,zs):
return np.roll(np.transpose(np.meshgrid(xs,ys,zs)).reshape(-1,3),1,axis=1)
def mk_body_centered_linspace(infimum, supremum, nNodes, withBoundaryNodes=False):
"""
Make regular body centered linear space w/ or w/o neighboring boundary nodes.
"""
domsize = np.abs(supremum - infimum)
offset = domsize / nNodes / 2
if withBoundaryNodes:
nNodes = nNodes + 2
infimum = infimum - offset
supremum = supremum + offset
else:
infimum = infimum + offset
supremum = supremum - offset
return np.linspace(infimum, supremum, nNodes, endpoint=True)
def mk_body_centered_to_face_centered(xs):
return np.concatenate([(xs-0.5*(np.roll(xs,-1)-xs))[:-1],(xs+0.5*(xs-np.roll(xs,1)))[len(xs)-2:]])
def wrap_in_guard_cells(stone):
"""
Wrap a cubiod block of data with 'guard cells' which
represents the opposite side - periodic boundaries.
Parameters
----------
stone : ndarray
Returns
-------
ndarray of shape (stone[0]+2, stone[1]+2, ...)
"""
plum = np.zeros(np.array(stone.shape)+2)
# fill stone of plum
plum[1:-1,1:-1,1:-1] = stone
# wrap up stone with pulp
plum[ 0,1:-1,1:-1] = stone[-1,:,:]
plum[-1,1:-1,1:-1] = stone[ 0,:,:]
plum[1:-1, 0,1:-1] = stone[:,-1,:]
plum[1:-1,-1,1:-1] = stone[:, 0,:]
plum[1:-1,1:-1, 0] = stone[:,:,-1]
plum[1:-1,1:-1,-1] = stone[:,:, 0]
# don't forget the edges
plum[:, 0, 0] = plum[:, 0, -2]
plum[:,-1, 0] = plum[:,-1, -2]
plum[:, 0,-1] = plum[:, 0, 1]
plum[:,-1,-1] = plum[:,-1, 1]
plum[ 0,:, 0] = plum[ 0, :,-2]
plum[-1,:, 0] = plum[-1, :,-2]
plum[ 0,:,-1] = plum[ 0, :, 1]
plum[-1,:,-1] = plum[-1, :, 1]
plum[ 0, 0,:] = plum[ 0,-2, :]
plum[-1, 0,:] = plum[-1,-2, :]
plum[ 0,-1,:] = plum[ 0, 1, :]
plum[-1,-1,:] = plum[-1, 1, :]
return plum
def transform_to_ref_space(left, right, nodes):
return left + (right-left) * (nodes+1)/2
def diff(xs,ys,step=1):
xsnw = xs + (np.roll(xs,-step) - xs)/(step+1)
dydx = (np.roll(ys,-step) - ys) / (np.roll(xs,-step) - xs)
return (xsnw[:-step], dydx[:-step])
def diff_x(f,Delta=1):
return (np.roll(f,-1,axis=0) - np.roll(f,1,axis=0))/2./Delta
def diff_y(f,Delta=1):
return (np.roll(f,-1,axis=1) - np.roll(f,1,axis=1))/2./Delta
def diff_z(f,Delta=1):
return (np.roll(f,-1,axis=2) - np.roll(f,1,axis=2))/2./Delta
def curl(X,Y,Z,Dx,Dy,Dz):
dX = (diff_y(Z,Dy) - diff_z(Y,Dz))
dY = (diff_z(X,Dz) - diff_x(Z,Dx))
dZ = (diff_x(Y,Dx) - diff_y(X,Dy))
return (dX,dY,dZ)
def S(velx, vely, velz, Delta=1):
vels = [velx,vely,velz]
difs = [diff_x, diff_y, diff_z]
acc = 0
for i in range(len(vels)):
for j in range(len(difs)):
acc += difs[j](vels[i]) + difs[i](vels[j])
return 1/2 * acc
def Q(velx, vely, velz, Delta=1):
vels = [velx,vely,velz]
difs = [diff_x, diff_y, diff_z]
acc = 0
for i in range(len(vels)):
for j in range(len(difs)):
acc += difs[j](vels[i]) * difs[i](vels[j])
return -1/2 * acc
def R(velx, vely, velz, Delta=1):
vels = [velx,vely,velz]
difs = [diff_x, diff_y, diff_z]
acc = 0
for i in range(3):
for j in range(3):
for k in range(3):
acc += difs[j](vels[i]) * difs[k](vels[j]) * difs[i](vels[k])
return -1/3 * acc
def norm(X,Y,Z):
return X**2 + Y**2 + Z**2
def find_file(fname, paths):
for path in paths:
for root, dirs, files in os.walk(path):
if fname in files:
return os.path.join(root, fname)
raise FileNotFoundError("Cannot find '%s' in any of %s." % (fname, paths))
def navier_primitive_to_conservative_2d(prims, kappa=5/3):
cons = [None]*len(prims)
cons[0] = prims[0] # density
cons[1] = prims[0]*prims[1] # momentum x
cons[2] = prims[0]*prims[2] # momentum y
cons[3] = prims[3]/(kappa-1) \
+ prims[0]/2*(prims[1]**2+prims[2]**2) # total energy
return cons
def navier_conservative_to_primitive_2d(cons, kappa=5/3):
prims = [None]*len(cons)
prims[0] = cons[0] # density
prims[1] = cons[1] / cons[0] # velx
prims[2] = cons[2] / cons[0] # vely
prims[3] = (kappa-1)*(cons[3] \
- prims[0]/2*(prims[1]**2+prims[2]**2)) # pressure
return prims
def navier_primitive_to_conservative(prims, kappa=5/3):
cons = [None]*len(prims)
cons[0] = prims[0] # density
cons[1] = prims[0]*prims[1] # momentum x
cons[2] = prims[0]*prims[2] # momentum y
cons[3] = prims[0]*prims[3] # momentum z
cons[4] = prims[4]/(kappa-1) \
+ prims[0]/2*(prims[1]**2+prims[2]**2+prims[3]**2) # total energy
return cons
def navier_conservative_to_primitive(cons, kappa=5/3):
prims = [None]*len(cons)
prims[0] = cons[0] # density
prims[1] = cons[1] / cons[0] # velx
prims[2] = cons[2] / cons[0] # vely
prims[3] = cons[3] / cons[0] # velz
prims[4] = (kappa-1)*(cons[4] \
- prims[0]/2*(prims[1]**2+prims[2]**2+prims[3]**2)) # pressure
return prims
def mhd_primitive_to_conservative(prims, kappa=5/3, mu0=1.0):
cons = [None]*len(prims)
cons[0] = prims[0] # density
cons[1] = prims[0]*prims[1] # momentum x
cons[2] = prims[0]*prims[2] # momentum y
cons[3] = prims[0]*prims[3] # momentum z
cons[4] = prims[4]/(kappa-1) + prims[0]/2*(prims[1]**2+prims[2]**2+prims[3]**2) \
+ (prims[5]**2+prims[6]**2+prims[7]**2)/2/mu0 # total energy
cons[5] = prims[5] # mag x
cons[6] = prims[6] # mag y
cons[7] = prims[7] # mag z
return cons
def mhd_conservative_to_primitive(cons, kappa=5/3, mu0=1.0):
prims = [None]*len(cons)
prims[0] = cons[0] # density
prims[1] = cons[1] / cons[0] # velx
prims[2] = cons[2] / cons[0] # vely
prims[3] = cons[3] / cons[0] # velz
prims[4] = (kappa-1)*(cons[4] - cons[0]/2*(prims[1]**2+prims[2]**2+prims[3]**2) \
- (cons[5]**2+cons[6]**2+cons[7]**2)/2/mu0) # pressure
prims[5] = cons[5] # mag x
prims[6] = cons[6] # mag y
prims[7] = cons[7] # mag z
return prims
def bins2xs(edges):
return edges[:-1] + (edges[1]-edges[0])/2
def mkincr(start=0,step=1):
pos = start
while True:
yield pos
pos += step
def moving_avg_1d(ys, xs=None, N=3):
m = len(ys)
ret_ys = ys[:m - (m % N)].reshape(m//N,N)
if xs is not None:
if m != len(xs): raise ValueError("'ys' and 'xs' must be of equal length!")
ret_xs = xs[:m - (m % N)].reshape(m//N,N)
return np.mean(ret_ys, axis=1), np.mean(ret_xs, axis=1)
return np.mean(ret_ys, axis=1)
def despike(ys,xs=None,diff=0.01,blocksize=6,mask=False):
dlen = len(ys)
tail = dlen % blocksize
retv = np.full(dlen, True, dtype=bool)
if tail > 0:
tmp = ys[:dlen-tail].reshape((-1,blocksize))
retv[:dlen-tail] = np.ravel(np.abs(tmp.T - np.mean(tmp,axis=1)).T) < diff
tmp = ys[dlen-tail:dlen]
retv[dlen-tail:dlen] = np.abs(tmp - np.mean(tmp)) < diff
else:
tmp = ys.reshape((-1,blocksize))
retv = np.ravel(np.abs(tmp.T - np.mean(tmp,axis=1)).T) < diff
if xs is not None:
if dlen != len(xs): raise ValueError("'ys' and 'xs' must be of equal length!")
return ys[retv], xs[retv]
else:
if mask is True:
return retv
else:
return ys[retv]
def zoom_array(arr,factor=2):
retv = arr
for i in range(len(retv.shape)): retv = retv.repeat(factor,axis=i)
return retv
## ========================================================================= ##
## caching and testing routines
def cache(srcfp, cachefp, task, *args):
import pickle as pk
import pathlib as pl
srcfp = pl.Path(srcfp)
cachefp = pl.Path(cachefp)
if cachefp.exists() and cachefp.stat().st_mtime > srcfp.stat().st_mtime:
with cachefp.open(mode='rb') as fh:
result = pk.load(fh)
else:
result = task(*args)
with cachefp.open(mode='wb') as fh:
pk.dump(result, fh)
return result
def flatten_dict(d, delimiter='.'):
def expand(key, value):
if isinstance(value, dict):
return [
(delimiter.join([key, k]), v)
for k, v in flatten_dict(value, delimiter).items()
]
else:
return [(key, value)]
return dict(
[item for k, v in d.items() for item in expand(k, v)]
)
## ========================================================================= ##
## command line utilities
def URLhandler(url):
if url.lower().startswith('file://'):
with open(url[len('file://'):]) as fd: return fd.read()
return url
def PositiveInt(arg):
x = int(arg)
if x >= 0:
return x
else:
raise ValueError("'%d' must be positive!" % x)
```
#### File: tools/lib/vectoranalysis3D.py
```python
import numpy as np
def diff_x(f,Delta=1):
return (np.roll(f,-1,axis=0) - np.roll(f,1,axis=0))/2./Delta
def diff_y(f,Delta=1):
return (np.roll(f,-1,axis=1) - np.roll(f,1,axis=1))/2./Delta
def diff_z(f,Delta=1):
return (np.roll(f,-1,axis=2) - np.roll(f,1,axis=2))/2./Delta
def curl(X,Y,Z,Dx,Dy,Dz):
dX = (diff_y(Z,Dy) - diff_z(Y,Dz))
dY = (diff_z(X,Dz) - diff_x(Z,Dx))
dZ = (diff_x(Y,Dx) - diff_y(X,Dy))
return (dX,dY,dZ)
def norm(X,Y,Z):
return X**2 + Y**2 + Z**2
```
#### File: tools/test/cells_to_plane_3d.py
```python
import sys
import numpy as np
import titanic
import interpolate as itpl
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams.update({'font.family': 'Monospace', 'font.size': 8})
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Line3DCollection
import ulz
def vnorm(v):
return np.sqrt(np.sum(v*v))
fp = sys.argv[1]
fh = titanic.File(fp)
levels = fh.get('/mesh/levels')
morton = fh.get('/mesh/morton')
cells = np.transpose(fh.get('/data/states/dens'),(0,3,2,1))
#p = np.array([0.5,0.0,0.0],dtype=np.double)
#u = np.array([0.0,1.0,0.0],dtype=np.double)
#v = np.array([0.0,0.0,1.0],dtype=np.double)
#p = np.array([0.0,0.5,0.0],dtype=np.double)
#u = np.array([1.0,0.0,0.0],dtype=np.double)
#v = np.array([0.0,0.0,1.0],dtype=np.double)
#p = np.array([0.0,0.0,0.5],dtype=np.double)
#u = np.array([1.0,0.0,0.0],dtype=np.double)
#v = np.array([0.0,1.0,0.0],dtype=np.double)
#p = np.array([0.0,0.0,0.0],dtype=np.double)
#u = np.array([1.0,0.0,0.0],dtype=np.double)
#v = np.array([0.0,1.0,1.0],dtype=np.double)
#p = np.array([0.0,0.0,0.1],dtype=np.double)
#u = np.array([1.0,0.0,0.0],dtype=np.double)
#v = np.array([0.0,1.0,0.1],dtype=np.double)
#p = np.array([0.0,0.0,0.0],dtype=np.double)
#u = np.array([1.0,0.0,0.0],dtype=np.double)
#v = np.array([0.0,1.0,0.5],dtype=np.double)
#p = np.array([0.0,0.0,0.0],dtype=np.double)
#u = np.array([1.0,0.0,0.0],dtype=np.double)
#v = np.array([0.0,1.0,0.1],dtype=np.double)
#S = 1./np.sqrt(2)
#Q = 0.5*(1-S)
#p = np.array([0.0, Q , Q ],dtype=np.double)
#u = np.array([1.0,0.0,0.0],dtype=np.double)
#v = np.array([0.0, S , S ],dtype=np.double)
#p = np.array([0.0, 0.0, 0.0],dtype=np.double)
#u = np.array([1.0, 0.0, 0.0],dtype=np.double)
#v = np.array([0.0, 1.0, 1.0],dtype=np.double)
#p = np.array([0.0, 0.0, 0.0],dtype=np.double)
#u = np.array([1.0, 0.0, 0.5],dtype=np.double)
#v = np.array([0.0, 1.0, 0.5],dtype=np.double)
def interpol_3d(p,u,v,cells,shape=2*(2*128,)):
image = np.zeros(shape,dtype=np.double)
itpl.cells_to_plane_3d(levels,morton,cells,image,p,u,v,method='linear')
return image
def get_grid3d(p,u,v):
edgecount = itpl.plane_morton_to_coords(levels,morton,p,u,v)
edges = np.zeros((edgecount,2,3))
edgecount = itpl.plane_morton_to_coords(levels,morton,p,u,v,edges)
return edges
dpi = 150
#extent = [0,1,0,1]
#extent = [0,1,0,1.01]
#extent = [-0.5,1.5,-0.5,1.5]
#extent = [0,np.linalg.norm(u),0,np.linalg.norm(v)]
#segments = edges[:,:,1:3]
#segments = edges[:,:,0:3:2]
#segments = edges[:,:,0:2]
#plt.gca().add_collection(matplotlib.collections.LineCollection(segments,color='white',linewidths=0.5,linestyles='solid'))
# plt.imshow(
# data,
# extent=extent,
# vmin = 0.0, vmax = 3.0,
# cmap='cubehelix',
# #cmap='viridis',
# interpolation=None,
# origin='lower left'
# )
# plt.colorbar()
def mk_facecolors(data,cmap='cubehelix',cmin=None,cmax=None):
if cmin is None: cmin = np.min(data)
if cmax is None: cmax = np.max(data)
norm = matplotlib.colors.Normalize(cmin, cmax)
m = plt.cm.ScalarMappable(norm=norm,cmap=cmap)
m.set_array([])
return m.to_rgba(data)
fcmin = 0.0
fcmax = 3.0
fig = plt.figure(figsize=(720/dpi, 720/dpi), dpi=dpi)
ax = fig.gca(projection='3d')
#ax.view_init(elev=10.0,azim=-90.0)
#ax.view_init(elev=10.0,azim=-45.0)
ax.view_init(elev=10.0,azim=-120.0)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.0, 1.0)
ax.set_zlim(0.0, 1.0)
# -------------------------------------------------------------------------- #
if 1:
p = np.array([ 0.0, 0.0, 0.0],dtype=np.double)
u = np.array([ 1.0, 0.0, 0.5],dtype=np.double)
v = np.array([ 0.0, 1.0, 0.5],dtype=np.double)
data = interpol_3d(p,u,v,cells)
edges = get_grid3d(p,u,v)
s = ulz.mk_body_centered_linspace(0,1,data.shape[0])
t = ulz.mk_body_centered_linspace(0,1,data.shape[1])
ss,tt = np.meshgrid(s,t,indexing='ij')
# create vertices for a rotated mesh (3D rotation matrix)
X = p[0] + ss*u[0] + tt*v[0]
Y = p[1] + ss*u[1] + tt*v[1]
Z = p[2] + ss*u[2] + tt*v[2]
fc = mk_facecolors(data,cmin=fcmin,cmax=fcmax)
ax.plot_surface(X,Y,Z, rstride=4, cstride=4, facecolors=fc, vmin=fcmin, vmax=fcmax, shade=False)
ax.add_collection(Line3DCollection(edges,color='black',linewidths=0.05,linestyles='solid'))
# -------------------------------------------------------------------------- #
if 0:
p = np.array([ 0.0, 0.0, 0.0],dtype=np.double)
u = np.array([ 0.0, 0.0, 1.0],dtype=np.double)
v = np.array([ 1.0, 1.0, 0.0],dtype=np.double)
data = interpol_3d(p,u,v,cells)
edges = get_grid3d(p,u,v)
s = ulz.mk_body_centered_linspace(0,1,data.shape[0])
t = ulz.mk_body_centered_linspace(0,1,data.shape[1])
ss,tt = np.meshgrid(s,t,indexing='ij')
# create vertices for a rotated mesh (3D rotation matrix)
X = p[0] + ss*u[0] + tt*v[0]
Y = p[1] + ss*u[1] + tt*v[1]
Z = p[2] + ss*u[2] + tt*v[2]
fc = mk_facecolors(data,cmin=fcmin,cmax=fcmax)
ax.plot_surface(X,Y,Z, rstride=4, cstride=4, facecolors=fc, vmin=fcmin, vmax=fcmax, shade=False)
ax.add_collection(Line3DCollection(edges,color='black',linewidths=0.02,linestyles='solid'))
# -------------------------------------------------------------------------- #
if 0:
p = np.array([ 0.5, 0.5, 0.0],dtype=np.double)
u = np.array([ 0.0, 0.0, 1.0],dtype=np.double)
v = np.array([ 0.5, -0.5, 0.0],dtype=np.double)
data = interpol_3d(p,u,v,cells)
edges = get_grid3d(p,u,v)
s = ulz.mk_body_centered_linspace(0,1,data.shape[0])
t = ulz.mk_body_centered_linspace(0,1,data.shape[1])
ss,tt = np.meshgrid(s,t,indexing='ij')
# create vertices for a rotated mesh (3D rotation matrix)
X = p[0] + ss*u[0] + tt*v[0]
Y = p[1] + ss*u[1] + tt*v[1]
Z = p[2] + ss*u[2] + tt*v[2]
fc = mk_facecolors(data,cmin=fcmin,cmax=fcmax)
ax.plot_surface(X,Y,Z, rstride=4, cstride=4, facecolors=fc, vmin=fcmin, vmax=fcmax, shade=False)
ax.add_collection(Line3DCollection(edges,color='black',linewidths=0.1,linestyles='solid'))
# -------------------------------------------------------------------------- #
if 0:
p = np.array([ 0.5, 0.5, 0.0],dtype=np.double)
u = np.array([ 0.0, 0.0, 1.0],dtype=np.double)
v = np.array([ 0.5, -0.5, 0.0],dtype=np.double)
data = interpol_3d(p,u,v,cells)
edges = get_grid3d(p,u,v)
s = ulz.mk_body_centered_linspace(0,1,data.shape[0])
t = ulz.mk_body_centered_linspace(0,1,data.shape[1])
ss,tt = np.meshgrid(s,t,indexing='ij')
# create vertices for a rotated mesh (3D rotation matrix)
X = p[0] + ss*u[0] + tt*v[0]
Y = p[1] + ss*u[1] + tt*v[1]
Z = p[2] + ss*u[2] + tt*v[2]
fc = mk_facecolors(data,cmin=fcmin,cmax=fcmax)
ax.plot_surface(X,Y,Z, rstride=4, cstride=4, facecolors=fc, vmin=fcmin, vmax=fcmax, shade=False)
ax.add_collection(Line3DCollection(edges,color='black',linewidths=0.1,linestyles='solid'))
# -------------------------------------------------------------------------- #
plt.title('density blob: cake cut')
fig.tight_layout()
plt.savefig('test.png', bbox_inches='tight', dpi=dpi)
plt.close()
``` |
{
"source": "jm-armijo/design-patterns",
"score": 3
} |
#### File: template-method/python3/compiler.py
```python
class Compiler:
def build(self, code):
print("Source code: {}".format(code))
code = self.preprocess(code)
code = self.compile(code)
code = self.assemble(code)
code = self.link(code)
``` |
{
"source": "jmaroeder/python-singletons",
"score": 2
} |
#### File: src/singletons/shared_module.py
```python
import logging
import types
from typing import Any, Callable, MutableMapping, Optional, Type, Union
from unittest.mock import Mock
from singletons.factory import ( # noqa: WPS436
EventletFactory,
GeventFactory,
GlobalFactory,
GreenthreadFactory,
ProcessFactory,
ThreadFactory,
_FactoryBase,
)
from singletons.singleton import (
EventletSingleton,
GeventSingleton,
GreenthreadSingleton,
ProcessSingleton,
Singleton,
ThreadSingleton,
)
from singletons.utils import env_to_bool
SETUP_MOCK = "SINGLETONS_SETUP_MOCK"
LOG = logging.getLogger(__name__)
METACLASS_FACTORY_MAP = types.MappingProxyType(
{
Singleton: GlobalFactory,
ProcessSingleton: ProcessFactory,
ThreadSingleton: ThreadFactory,
GreenthreadSingleton: GreenthreadFactory,
EventletSingleton: EventletFactory,
GeventSingleton: GeventFactory,
},
)
class SharedModule(types.ModuleType):
"""
Base class used to intercept attribute accesses to a module.
See https://mail.python.org/pipermail/python-ideas/2012-May/014969.html where Guido talks
about this technique.
This allows for lazy loading and overriding in the case of ``setup_mock``.
Subclasses must set ``globals`` class attribute.
Example usage (at the very bottom of a module to be made into a shared module)::
class _Shared(SharedModule):
globals = globals()
sys.modules[__name__] = _Shared()
"""
_mock: Optional[MutableMapping] = None
def __init__(self) -> None:
if not hasattr(self, "globals"): # noqa: WPS421
raise NotImplementedError(
"SharedModule subclasses must define the `globals` attribute "
+ "(see documentation for example)",
) # pragma: no cover
if env_to_bool(SETUP_MOCK):
self.setup_mock() # pragma: no cover
def setup_mock(self) -> None:
"""
Switch the module to ``mock`` mode, or reset all existing Mocks.
All attribute accesses will receive mock objects instead of actual ones.
"""
self._mock = {}
def teardown_mock(self) -> None:
"""
Switch the module out of ``mock`` mode.
Remove all existing Mocks.
"""
self._mock = None
def __getattr__(self, key: str) -> Any:
if self._mock is None:
try:
return self.globals[key]
except KeyError:
raise AttributeError(
f"module '{self.globals['__name__']}' has no attribute '{key}'",
)
if key not in self._mock:
self._mock[key] = self._instantiate_mock_instance(key)
return self._mock[key]
def __setattr__(self, key: str, attr_value: Any) -> None:
if key == "_mock":
super().__setattr__(key, attr_value)
if self._mock is None:
self.globals[key] = attr_value
return
self._mock[key] = attr_value
def _instantiate_mock_instance(self, key: str) -> Union[Callable, Mock]:
"""
Select the appropriately scoped mock instance, or a simple Mock.
:raises: AttributeError
:arg key: The item name
:return: The factory or Mock
"""
try:
original = self.globals[key]
except KeyError:
raise AttributeError(f"module '{self.globals['__name__']}' has no attribute '{key}'")
metaclass = getattr(original, "singleton_metaclass", None) or type(original)
factory = self._select_factory(metaclass)
if factory is None:
return Mock()
LOG.debug("Using factory %s for %s", factory, key)
# create a factory with the appropriate scope
@factory # type: ignore
def _mock_factory() -> Mock: # noqa: WPS430
return Mock()
return _mock_factory
def _select_factory(self, metaclass: Type) -> Optional[Type[_FactoryBase]]:
"""
Select the appropriate factory to use based on the metaclass.
:arg metaclass: One of the Singleton metaclasses
:return: The appropriate Factory class, or None
"""
for key, factory in METACLASS_FACTORY_MAP.items():
if issubclass(metaclass, key):
return factory
return None
```
#### File: src/singletons/utils.py
```python
import contextlib
import os
import sys
import warnings
from singletons.exceptions import NoGreenthreadEnvironmentWarning
BOOLEAN_TRUE_STRINGS = frozenset(("true", "on", "ok", "y", "yes", "1"))
_greenthread_environment = None # noqa: WPS121, WPS122
def _detect_greenthread_environment() -> str:
"""
Detect if eventlet or gevent are in use.
:return: 'eventlet', 'gevent', or 'default' (neither environment detected)
"""
if "eventlet" in sys.modules:
with contextlib.suppress(ImportError):
from eventlet.patcher import is_monkey_patched # noqa: WPS433
import socket # noqa: WPS433
if is_monkey_patched(socket):
return "eventlet"
if "gevent" in sys.modules:
with contextlib.suppress(ImportError):
from gevent import socket as gsocket # noqa: WPS433
import socket # noqa: WPS433, WPS440
if socket.socket is gsocket.socket: # type: ignore
return "gevent"
return "default"
def detect_greenthread_environment() -> str:
"""
Detect the current greenthread environment.
:return: 'eventlet', 'gevent', or 'default' (neither environment detected)
"""
global _greenthread_environment # noqa: WPS420
if _greenthread_environment is None:
_greenthread_environment = _detect_greenthread_environment() # noqa: WPS122, WPS442
return _greenthread_environment # noqa: WPS121
def greenthread_ident() -> int:
"""
Get the identifier of the current greenthread environment.
:return: get_ident() or 0 if no greenthread environment is detected.
"""
greenthread_environment = detect_greenthread_environment()
if greenthread_environment == "eventlet":
import eventlet.corolocal # noqa: WPS433
return eventlet.corolocal.get_ident()
if greenthread_environment == "gevent":
import gevent.thread # noqa: WPS433
return gevent.thread.get_ident()
warnings.warn(
"No greenthread environment detected - falling back to global scope",
NoGreenthreadEnvironmentWarning,
)
return 0
def env_to_bool(key: str) -> bool:
"""
Parse an environment variable and coerce it to a boolean value.
:param key: the environment variable to use
:return: the coerced bool value
"""
str_value = os.environ.get(key, "")
return str_value.strip().lower() in BOOLEAN_TRUE_STRINGS
``` |
{
"source": "jmaronas/pytorch_datasets",
"score": 3
} |
#### File: jmaronas/pytorch_datasets/common.py
```python
from torchvision.datasets.utils import * #check_integrity
from torchvision.datasets.folder import * #ImageFolder
import os
import gzip
import tarfile
import zipfile
def _is_tar(filename):
return filename.endswith(".tar")
def _is_targz(filename):
return filename.endswith(".tar.gz") or filename.endswith(".tgz")
def _is_gzip(filename):
return filename.endswith(".gz") and not filename.endswith(".tar.gz")
def _is_zip(filename):
return filename.endswith(".zip")
def extract_archive(from_path, to_path=None, remove_finished=False):
if to_path is None:
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif _is_targz(from_path):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0])
with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError("Extraction of {} not supported".format(from_path))
if remove_finished:
os.remove(from_path)
def download_and_extract_archive(url, download_root, extract_root=None, filename=None, md5=None, remove_finished=False):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
```
#### File: jmaronas/pytorch_datasets/tiny_imagenet.py
```python
import torch
import torch.utils.data as data
from scipy.misc import imresize,imsave
from PIL import Image
import os
from common import *
#TODO: return bounding boxes, allow for other types of interpolations
class tiny_ImageNet(ImageFolder):
"""`tiny ImageNet <https://tiny-imagenet.herokuapp.com/>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``tiny-imagenet-200.zip`` exists or will be saved to if download is set to True.
partition (string): Select partition: train/valid/test. If test is selected then
a unique label 0 is returned.
image_shape (int,optional): Number specifying the shape of the final image, default is 64.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
def __init__(self,directory, partition, image_shape=64,transform=None, target_transform=None,download=True):
self.md5sum='90528d7ca1a48142e341f4ef8d21d0de'
self.url='http://cs231n.stanford.edu/tiny-imagenet-200.zip'
self.directory=directory
self.filename='tiny-imagenet-200'
self.partition=partition
self.image_shape=image_shape
self.interpolation='bilinear'
self.transform=transform
self.target_transform=target_transform
if download:
self._download()
#check everything is correctly downloaded
if not self._check_integrity():
raise Exception("Files corrupted. Set download=True")
#one can decide to reshape images after download, instead of doing online, which will increse computation
if not self._check_if_process():
self._process()
split_folder=os.path.join(self.directory,self.filename,'processed',str(self.image_shape),self.partition,'images')
super(tiny_ImageNet, self).__init__(split_folder,transform=transform,target_transform=target_transform)
def _get_classes(self):
path=os.path.join(self.directory,self.filename,'wnids.txt')
return [line.split('\n')[0] for line in open(path,'r')]
def _create_directories(self,class_list):
for _ in class_list:
path=os.path.join(self.directory,self.filename,'processed',str(self.image_shape),self.partition,'images',_)
os.makedirs(path)
def _process_train(self,class_list):
print("Processing Train Images")
for c in class_list:
path=os.path.join(self.directory,self.filename,'train',c,'images')
for f in os.listdir(path):
fpath=os.path.join(self.directory,self.filename,'train',c,'images',f)
image = Image.open(fpath)
if self.image_shape!=64:
new_im=imresize(image,(self.image_shape,self.image_shape),self.interpolation)
else:
new_im=image
save_path=os.path.join(self.directory,self.filename,'processed',str(self.image_shape),self.partition,'images',c,f)
imsave(save_path+'.JPEG',new_im)
def _process_valid(self):
print("Processing Validation Images")
val_file = os.path.join(self.directory,self.filename,'val','val_annotations.txt')
image_dir = os.path.join(self.directory,self.filename,'val','images')
for line in open(val_file,'r'):
image_name,image_class,_,_,_,_=line.split()
fpath=os.path.join(image_dir,image_name)
image = Image.open(fpath)
if self.image_shape!=64:
new_im=imresize(image,(self.image_shape,self.image_shape),self.interpolation)
else:
new_im=image
save_path=os.path.join(self.directory,self.filename,'processed',str(self.image_shape),self.partition,'images',image_class,image_name)
imsave(save_path+'.JPEG',new_im)
def _process_test(self):
path=os.path.join(self.directory,self.filename,'test','images')
print("Processing Test Images")
for f in os.listdir(path):
fpath=os.path.join(self.directory,self.filename,'test','images',f)
image = Image.open(fpath)
if self.image_shape!=64:
new_im=imresize(image,(self.image_shape,self.image_shape),self.interpolation)
else:
new_im=image
save_path=os.path.join(self.directory,self.filename,'processed',str(self.image_shape),self.partition,'images','0',f)
imsave(save_path+'.JPEG',new_im)
def _process(self):
path=os.path.join(self.directory,self.filename,'processed',str(self.image_shape),self.partition,'images')
os.makedirs(path)
class_lists=self._get_classes()
if self.partition in ['train','valid']:
self._create_directories(class_lists)
else:
path=os.path.join(self.directory,self.filename,'processed',str(self.image_shape),self.partition,'images','0')
os.makedirs(path)
if self.partition=='train':
self._process_train(class_lists)
elif self.partition=='valid':
self._process_valid()
elif self.partition=='test':
self._process_test()
else:
raise ValueError('Invalid {} partition name. Choose from train valid or test'.format(partition))
path=os.path.join(self.directory,self.filename,'processed',str(self.image_shape),self.partition,'.correctly_processed')
open(path,'w').close()
def _check_if_process(self):
#first check if the path exits
path=os.path.join(self.directory,self.filename,'processed',str(self.image_shape),self.partition)
if not os.path.exists(path):
return False
#check if the process correctly finished
path_file=os.path.join(self.directory,self.filename,'processed',str(self.image_shape),self.partition,'.correctly_processed')
if not os.path.isfile(path_file):
raise Exception("You seem to already have a folder named {} where processing did not succed. Erase folder and re-run".format(path))
return True
def _check_integrity(self):
fpath=os.path.join(self.directory,self.filename+'.zip')
if not check_integrity(fpath, self.md5sum):
return False
return True
def _download(self):
if self._check_integrity():
print('Files already downloaded and verified')
return
download_and_extract_archive(self.url, self.directory, filename=self.filename+'.zip', md5=self.md5sum)
``` |
{
"source": "jmarquisbq/SIMPL",
"score": 2
} |
#### File: Python/Testing/Create_Vertex_Geometry.py
```python
import simpl
import simplpy as d3d
import simpl_helpers as sh
import simpl_test_dirs as sd
def CreateVertexGeometryTest():
# Create Data Container Array
dca = simpl.DataContainerArray()
# Create the Data Container
err = d3d.create_data_container(dca, 'DataContainer')
assert err == 0, f'DataContainer ErrorCondition: {err}'
# Import ASCII Data - #1 - Vertex Coordinates
importFile = sd.GetBuildDirectory() + '/Data/SIMPL/VertexCoordinates.csv'
wizardData = {
'inputFilePath': importFile,
'beginIndex': 2,
'numberOfLines': 145,
'delimiters': [','],
'consecutiveDelimiters': False,
'automaticAM': True,
'selectedPath': simpl.DataArrayPath('DataContainer', 'Bounds', ''),
'headers': ['x', 'y', 'z'],
'attrMatType': 3,
'tupleDimensions': [144],
'dataTypes': ['float', 'float', 'float']
}
err = d3d.read_ascii_data(dca, wizardData)
assert err == 0, f'Import ASCII Data #1 - ErrorCondition: {err}'
# Combine Attribute Arrays # 1:
selectedDataArrayPaths = [simpl.DataArrayPath('DataContainer', 'Bounds', 'x'),
simpl.DataArrayPath('DataContainer', 'Bounds', 'y'),
simpl.DataArrayPath('DataContainer', 'Bounds', 'z')]
err = d3d.combine_attribute_arrays(dca, selectedDataArrayPaths, 'Vertices', False)
assert err == 0, f'Combined Attribute Arrays #1 - ErrorCondition: {err}'
# Delete Data # 1
dcap = simpl.DataContainerArrayProxy()
dcap.getDataContainerProxy('DataContainer').Flag = 0
dcap.getDataContainerProxy('DataContainer').getAttributeMatrixProxy('Bounds').Flag = 0
dcap.getDataContainerProxy('DataContainer').getAttributeMatrixProxy('Bounds').getDataArrayProxy('x').Flag = 2
dcap.getDataContainerProxy('DataContainer').getAttributeMatrixProxy('Bounds').getDataArrayProxy('y').Flag = 2
dcap.getDataContainerProxy('DataContainer').getAttributeMatrixProxy('Bounds').getDataArrayProxy('z').Flag = 2
err = d3d.remove_arrays(dca, dcap)
assert err == 0, f'Remove Arrays #1 - ErrorCondition: {err}'
# Create Geometry
data_container_name = 'DataContainer'
shared_vertex_list_array_path = simpl.DataArrayPath('DataContainer', 'Bounds', 'Vertices')
vertex_attribute_matrix_name = 'VertexData'
err = sh.CreateGeometry(dca, 0, simpl.IGeometry.Type.Vertex, 'DataContainer', False,
shared_vertex_list_array_path = simpl.DataArrayPath('DataContainer', 'Bounds', 'Vertices'),
vertex_attribute_matrix_name = 'VertexData')
assert err == 0, f'Create Geometry - ErrorCondition: {err}'
err = d3d.data_container_writer(dca, sd.GetTestTempDirectory() + '/CreateVertexGeometry.dream3d', True, False)
assert err == 0, f'DataContainerWriter ErrorCondition: {err}'
if __name__ == '__main__':
CreateVertexGeometryTest()
``` |
{
"source": "jmarrama/hhvm",
"score": 2
} |
#### File: test/integration/common_tests.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
import os
import shutil
import subprocess
import sys
import tempfile
from hh_paths import hh_server, hh_client
from utils import write_files
class CommonSaveStateTests(object):
@classmethod
def setUpClass(cls):
cls.maxDiff = 2000
# we create the state in a different dir from the one we run our tests
# on, to verify that the saved state does not depend on any absolute
# paths
init_dir = tempfile.mkdtemp()
cls.repo_dir = tempfile.mkdtemp()
cls.config_path = os.path.join(cls.repo_dir, '.hhconfig')
cls.tmp_dir = tempfile.mkdtemp()
cls.hh_tmp_dir = tempfile.mkdtemp()
cls.saved_state_name = 'foo'
cls.test_env = dict(os.environ, **{
'HH_TEST_MODE': '1',
'HH_TMPDIR': cls.hh_tmp_dir,
'PATH': '%s:/bin:/usr/bin' % cls.tmp_dir,
})
with open(os.path.join(init_dir, '.hhconfig'), 'w') as f:
f.write(r"""
# some comment
assume_php = false""")
cls.files = {}
cls.files['foo_1.php'] = """
<?hh
function f() {
return g() + 1;
}
"""
cls.files['foo_2.php'] = """
<?hh
function g(): int {
return 0;
}
"""
cls.files['foo_3.php'] = """
<?hh
function h(): string {
return "a";
}
class Foo {}
function some_long_function_name() {
new Foo();
h();
}
"""
write_files(cls.files, init_dir)
write_files(cls.files, cls.repo_dir)
cls.save_command(init_dir)
shutil.rmtree(init_dir)
@classmethod
def save_command(cls):
raise NotImplementedError()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.repo_dir)
shutil.rmtree(cls.tmp_dir)
shutil.rmtree(cls.hh_tmp_dir)
@classmethod
def saved_state_path(cls):
return os.path.join(cls.tmp_dir, cls.saved_state_name)
def write_load_config(self, *changed_files):
raise NotImplementedError()
@classmethod
def start_hh_server(cls):
cmd = [hh_server, cls.repo_dir]
print(" ".join(cmd), file=sys.stderr)
return subprocess.Popen(
cmd,
stderr=subprocess.PIPE,
env=cls.test_env)
@classmethod
def get_server_logs(cls):
log_file = cls.proc_call([
hh_client, '--logname', cls.repo_dir]).strip()
with open(log_file) as f:
return f.read()
def setUp(self):
write_files(self.files, self.repo_dir)
def tearDown(self):
self.proc_call([
hh_client,
'stop',
self.repo_dir
])
for p in glob.glob(os.path.join(self.repo_dir, '*')):
os.remove(p)
@classmethod
def proc_call(cls, args, env=None, stdin=None):
"""
Invoke a subprocess, return stdout, send stderr to our stderr (for
debugging)
"""
env = {} if env is None else env
print(" ".join(args), file=sys.stderr)
proc = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=dict(cls.test_env, **env),
universal_newlines=True)
(stdout_data, stderr_data) = proc.communicate(stdin)
sys.stderr.write(stderr_data)
sys.stderr.flush()
return stdout_data
def check_cmd(self, expected_output, stdin=None, options=None):
raise NotImplementedError()
def test_modify_file(self):
"""
Add an error to a file that previously had none.
"""
with open(os.path.join(self.repo_dir, 'foo_2.php'), 'w') as f:
f.write("""
<?hh
function g(): int {
return 'a';
}
""")
self.write_load_config('foo_2.php')
self.check_cmd([
'{root}foo_2.php:4:24,26: Invalid return type (Typing[4110])',
' {root}foo_2.php:3:27,29: This is an int',
' {root}foo_2.php:4:24,26: It is incompatible with a string',
])
def test_new_file(self):
"""
Add a new file that contains an error.
"""
with open(os.path.join(self.repo_dir, 'foo_4.php'), 'w') as f:
f.write("""
<?hh
function k(): int {
return 'a';
}
""")
self.write_load_config('foo_4.php')
self.check_cmd([
'{root}foo_4.php:4:24,26: Invalid return type (Typing[4110])',
' {root}foo_4.php:3:27,29: This is an int',
' {root}foo_4.php:4:24,26: It is incompatible with a string',
])
def test_deleted_file(self):
"""
Delete a file that still has dangling references after restoring from
a saved state.
"""
os.remove(os.path.join(self.repo_dir, 'foo_2.php'))
self.write_load_config('foo_2.php')
self.check_cmd([
'{root}foo_1.php:4:20,20: Unbound name: g (a global function) (Naming[2049])',
'{root}foo_1.php:4:20,20: Unbound name: g (a global constant) (Naming[2049])',
])
def test_duplicated_file(self):
self.write_load_config('foo_2.php')
self.check_cmd(['No errors!'])
shutil.copyfile(
os.path.join(self.repo_dir, 'foo_2.php'),
os.path.join(self.repo_dir, 'foo_2_dup.php'))
self.check_cmd([
'{root}foo_2_dup.php:3:18,18: Name already bound: g (Naming[2012])',
' {root}foo_2.php:3:18,18: Previous definition is here'])
os.remove(os.path.join(self.repo_dir, 'foo_2.php'))
self.check_cmd(['No errors!'])
def test_moved_file(self):
"""
Move a file, then create an error that references a definition in it.
Check that the new file name is displayed in the error.
"""
self.write_load_config(
'foo_1.php', 'foo_2.php', 'bar_2.php',
)
os.rename(
os.path.join(self.repo_dir, 'foo_2.php'),
os.path.join(self.repo_dir, 'bar_2.php'),
)
with open(os.path.join(self.repo_dir, 'foo_1.php'), 'w') as f:
f.write("""
<?hh
function f(): string {
return g();
}
""")
self.check_cmd([
'{root}foo_1.php:4:24,26: Invalid return type (Typing[4110])',
' {root}foo_1.php:3:27,32: This is a string',
' {root}bar_2.php:3:23,25: It is incompatible with an int',
])
def test_ide_tools(self):
"""
Test hh_client --search, --find-refs, --find-class-refs, --type-at-pos,
and --list-files
We *could* break this up into multiple tests, but starting the server
takes time and this test is slow enough already
"""
self.write_load_config()
self.check_cmd([
'File "{root}foo_3.php", line 9, characters 18-40: some_long_function_name, function'
], options=['--search', 'some_lo'])
self.check_cmd([
'File "{root}foo_3.php", line 11, characters 13-13: h',
'1 total results'
], options=['--find-refs', 'h'])
self.check_cmd([
'File "{root}foo_3.php", line 10, characters 13-21: Foo::__construct',
'1 total results'
], options=['--find-refs', 'Foo::__construct'])
self.check_cmd([
'File "{root}foo_3.php", line 10, characters 17-19: Foo::__construct',
'1 total results'
], options=['--find-class-refs', 'Foo'])
self.check_cmd([
'string'
], options=['--type-at-pos', '{root}foo_3.php:11:13'])
self.check_cmd([
# the doubled curly braces are because this string gets passed
# through format()
'[{{"name":"some_long_function_name",'
'"type":"(function(): _)",'
'"pos":{{"filename":"{root}foo_3.php",'
'"line":9,"char_start":18,"char_end":40}},'
'"func_details":{{"min_arity":0,"return_type":"_","params":[]}},'
'"expected_ty":false}}]'
],
# test the --json output because the non-json one doesn't contain
# the filename, and we are especially interested in testing file
# paths
options=['--auto-complete', '--json'],
stdin='<?hh function f() { some_AUTO332\n')
self.check_cmd([
'Foo::bar'
],
options=['--identify-function', '1:51'],
stdin='<?hh class Foo { private function bar() { $this->bar() }}')
os.remove(os.path.join(self.repo_dir, 'foo_2.php'))
self.check_cmd([
'{root}foo_1.php',
], options=['--list-files'])
```
#### File: test/integration/utils.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import signal
import subprocess
import sys
def touch(fn):
with open(fn, 'a'):
os.utime(fn, None)
def write_files(files, dir_path):
"""
Write a bunch of files into the directory at dir_path.
files: dict of file name => file contents
"""
for fn, content in files.items():
path = os.path.join(dir_path, fn)
with open(path, 'w') as f:
f.write(content)
def ensure_output_contains(f, s, timeout=20):
"""
Looks for a match in a process' output, subject to a timeout in case the
process hangs
"""
lines = []
def handler(signo, frame):
raise AssertionError('Failed to find %s in the following output: %s' %
(s, ''.join(lines)))
try:
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
while True:
line = f.readline().decode('utf-8')
if s in line:
return
lines.append(line)
finally:
signal.alarm(0)
```
#### File: tools/benchy/any_mean.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from confidence_interval import mean_confidence_interval, arith_mean
import argparse
import math
import re
import sys
MEASUREMENT_REGEX = re.compile(r'(.+):\s*(.+)')
def parse_measurements(in_file):
"""Parses a set of labeled measurements and aggregates like-labeled
measurements.
"""
categories = {}
for line in in_file:
result = MEASUREMENT_REGEX.match(line)
if result is None:
continue
lhs = str(result.group(1))
rhs = str(result.group(2))
try:
rhs = float(rhs)
except ValueError:
continue
if lhs not in categories:
categories[lhs] = []
categories[lhs].append(rhs)
return categories
def find_widest_key(categories):
"""Returns width of widest key for formatting.
"""
widest_key = 0
for key in categories:
if len(key) > widest_key:
widest_key = len(key)
return widest_key
def arithmetic_mean(samples):
"""Computes the arithmetic mean of a set of samples.
"""
return float(sum(samples)) / float(len(samples))
def geometric_mean(samples):
"""Computes the geometric mean of a set of samples.
"""
return math.exp(arithmetic_mean([math.log(x) for x in samples]))
# Select stripes across all categories and compute the geomean of these stripes
def compute_striped_geomeans(categories):
"""Pulls a sample from each category into a "stripe" and computes the
geometric mean of the stripe.
"""
geomeans = []
i = 0
while True:
stripe = []
for _, values in categories.iteritems():
if i >= len(values):
continue
stripe.append(values[i])
if len(stripe) == 0:
break
geomeans.append(geometric_mean(stripe))
i += 1
categories['Geomean'] = geomeans
def print_means_and_cis(categories, widest_key):
"""Prints the mean and confidence interval for each category.
"""
for key, values in categories.iteritems():
pad_width = widest_key - len(key)
padding = " " * pad_width
mean, interval = None, None
if len(values) > 1:
mean, interval = mean_confidence_interval(values)
print("%s: %s%.2f +-%.2f" % (key, padding, mean, interval))
else:
mean = arith_mean(values)
print("%s: %s%.2f" % (key, padding, mean))
sys.stderr.write("Warning: too few samples to calculate confidence"
" interval for \"%s\"\n" % key)
def main():
"""Parses arguments and passes them to the main computation functions.
"""
parser = argparse.ArgumentParser(description="Utility script for "
"calculating statistics on labeled data.")
parser.add_argument('--geomean', action='store_const', const=True,
default=False, help='Also outputs the geometric mean '
'of all the other means.')
parser.add_argument('file', nargs='?', type=str)
args = parser.parse_args()
infile = None
if args.file is None:
infile = sys.stdin
else:
infile = open(args.file, 'r')
categories = parse_measurements(infile)
if args.geomean:
compute_striped_geomeans(categories)
widest_key = find_widest_key(categories)
print_means_and_cis(categories, widest_key)
if __name__ == "__main__":
main()
``` |
{
"source": "jmarrec/aqtinstall",
"score": 2
} |
#### File: aqtinstall/aqt/archives.py
```python
import xml.etree.ElementTree as ElementTree
from logging import getLogger
from semantic_version import SimpleSpec, Version
from aqt.exceptions import ArchiveListError, NoPackageFound
from aqt.helper import Settings, getUrl
class TargetConfig:
def __init__(self, version, target, arch, os_name):
self.version = str(version)
self.target = target
self.arch = arch
self.os_name = os_name
class QtPackage:
"""
Hold package information.
"""
def __init__(self, name, archive_url, archive, package_desc, hashurl):
self.name = name
self.url = archive_url
self.archive = archive
self.desc = package_desc
self.hashurl = hashurl
class ListInfo:
"""
Hold list information
"""
def __init__(self, name, display_name, desc, virtual):
self.name = name
self.display_name = display_name
self.desc = desc
self.virtual = virtual
class PackagesList:
"""
Hold packages list information.
"""
def __init__(self, version, os_name, target, base, timeout=(5, 5)):
self.version = Version(version)
self.os_name = os_name
self.target = target
self.archives = []
self.base = base
self.timeout = timeout
self.logger = getLogger("aqt")
self._get_archives()
def _get_archives(self):
# Get packages index
if self.version.major == 6 and self.target == "android":
arch_ext = ["_armv7/", "_x86/", "_x86_64/", "_arm64_v8a/"]
elif self.version in SimpleSpec(">=5.13.0,<6.0") and self.target == "desktop":
arch_ext = ["/", "_wasm/"]
else:
arch_ext = ["/"]
for ext in arch_ext:
archive_path = "{0}{1}{2}/qt{3}_{3}{4}{5}{6}".format(
self.os_name,
"_x86/" if self.os_name == "windows" else "_x64/",
self.target,
self.version.major,
self.version.minor,
self.version.patch,
ext,
)
update_xml_url = "{0}{1}Updates.xml".format(self.base, archive_path)
xml_text = getUrl(update_xml_url, self.timeout, self.logger)
self.update_xml = ElementTree.fromstring(xml_text)
for packageupdate in self.update_xml.iter("PackageUpdate"):
name = packageupdate.find("Name").text
if packageupdate.find("DownloadableArchives").text is not None:
package_desc = packageupdate.findtext("Description")
display_name = packageupdate.findtext("DisplayName")
virtual_str = packageupdate.findtext("Virtual")
if virtual_str == "true":
virtual = True
else:
virtual = False
self.archives.append(
ListInfo(name, display_name, package_desc, virtual)
)
if len(self.archives) == 0:
self.logger.error("Error while parsing package information!")
exit(1)
def get_list(self):
return self.archives
class QtArchives:
"""Download and hold Qt archive packages list.
It access to download.qt.io site and get Update.xml file.
It parse XML file and store metadata into list of QtPackage object.
"""
def __init__(
self,
os_name,
target,
version,
arch,
base,
subarchives=None,
modules=None,
all_extra=False,
timeout=(5, 5),
):
self.version = Version(version)
self.target = target
self.arch = arch
self.os_name = os_name
self.all_extra = all_extra
self.arch_list = [item.get("arch") for item in Settings.qt_combinations]
all_archives = subarchives is None
self.base = base + "/online/qtsdkrepository/"
self.logger = getLogger("aqt.archives")
self.archives = []
self.mod_list = []
if all_extra:
self.all_extra = True
else:
for m in modules if modules is not None else []:
self.mod_list.append(
"qt.qt{0}.{0}{1}{2}.{3}.{4}".format(
self.version.major,
self.version.minor,
self.version.patch,
m,
arch,
)
)
self.mod_list.append(
"qt.{0}{1}{2}.{3}.{4}".format(
self.version.major,
self.version.minor,
self.version.patch,
m,
arch,
)
)
self.timeout = timeout
self._get_archives()
if not all_archives:
self.archives = list(filter(lambda a: a.name in subarchives, self.archives))
def _get_archives(self):
# Get packages index
if self.arch == "wasm_32":
arch_ext = "_wasm"
elif self.arch.startswith("android_") and self.version.major == 6:
arch_ext = "{}".format(self.arch[7:])
else:
arch_ext = ""
archive_path = "{0}{1}{2}/qt{3}_{3}{4}{5}{6}/".format(
self.os_name,
"_x86/" if self.os_name == "windows" else "_x64/",
self.target,
self.version.major,
self.version.minor,
self.version.patch,
arch_ext,
)
update_xml_url = "{0}{1}Updates.xml".format(self.base, archive_path)
archive_url = "{0}{1}".format(self.base, archive_path)
target_packages = []
target_packages.append(
"qt.qt{0}.{0}{1}{2}.{3}".format(
self.version.major,
self.version.minor,
self.version.patch,
self.arch,
)
)
target_packages.append(
"qt.{0}{1}{2}.{3}".format(
self.version.major, self.version.minor, self.version.patch, self.arch
)
)
target_packages.extend(self.mod_list)
self._download_update_xml(update_xml_url)
self._parse_update_xml(archive_url, target_packages)
def _download_update_xml(self, update_xml_url):
"""Hook for unit test."""
self.update_xml_text = getUrl(update_xml_url, self.timeout, self.logger)
def _parse_update_xml(self, archive_url, target_packages):
try:
self.update_xml = ElementTree.fromstring(self.update_xml_text)
except ElementTree.ParseError as perror:
self.logger.error("Downloaded metadata is corrupted. {}".format(perror))
raise ArchiveListError("Downloaded metadata is corrupted.")
else:
for packageupdate in self.update_xml.iter("PackageUpdate"):
name = packageupdate.find("Name").text
# Need to filter archives to download when we want all extra modules
if self.all_extra:
# Check platform
name_last_section = name.split(".")[-1]
if (
name_last_section in self.arch_list
and self.arch != name_last_section
):
continue
# Check doc/examples
if self.arch in ["doc", "examples"]:
if self.arch not in name:
continue
if self.all_extra or name in target_packages:
if packageupdate.find("DownloadableArchives").text is not None:
downloadable_archives = packageupdate.find(
"DownloadableArchives"
).text.split(", ")
full_version = packageupdate.find("Version").text
package_desc = packageupdate.find("Description").text
for archive in downloadable_archives:
archive_name = archive.split("-", maxsplit=1)[0]
package_url = (
archive_url + name + "/" + full_version + archive
)
hashurl = package_url + ".sha1"
self.archives.append(
QtPackage(
archive_name,
package_url,
archive,
package_desc,
hashurl,
)
)
if len(self.archives) == 0:
self.logger.error(
"Specified packages are not found while parsing XML of package information!"
)
raise NoPackageFound
def get_archives(self):
"""
It returns an archive package list.
:return package list
:rtype: List[QtPackage]
"""
return self.archives
def get_target_config(self) -> TargetConfig:
"""Get target configuration
:return: configured target and its version with arch
:rtype: TargetConfig object
"""
return TargetConfig(self.version, self.target, self.arch, self.os_name)
class SrcDocExamplesArchives(QtArchives):
"""Hold doc/src/example archive package list."""
def __init__(
self,
flavor,
os_name,
target,
version,
base,
subarchives=None,
modules=None,
all_extra=False,
timeout=(5, 5),
):
self.flavor = flavor
self.target = target
self.os_name = os_name
self.base = base
self.logger = getLogger("aqt.archives")
super(SrcDocExamplesArchives, self).__init__(
os_name,
target,
version,
self.flavor,
base,
subarchives=subarchives,
modules=modules,
all_extra=all_extra,
timeout=timeout,
)
def _get_archives(self):
archive_path = "{0}{1}{2}/qt{3}_{3}{4}{5}{6}".format(
self.os_name,
"_x86/" if self.os_name == "windows" else "_x64/",
self.target,
self.version.major,
self.version.minor,
self.version.patch,
"_src_doc_examples/",
)
archive_url = "{0}{1}".format(self.base, archive_path)
update_xml_url = "{0}/Updates.xml".format(archive_url)
target_packages = []
target_packages.append(
"qt.qt{0}.{0}{1}{2}.{3}".format(
self.version.major,
self.version.minor,
self.version.patch,
self.flavor,
)
)
target_packages.extend(self.mod_list)
self._download_update_xml(update_xml_url)
self._parse_update_xml(archive_url, target_packages)
def get_target_config(self) -> TargetConfig:
"""Get target configuration.
:return tuple of three parameter, "src_doc_examples", target and arch
"""
return TargetConfig("src_doc_examples", self.target, self.arch, self.os_name)
class ToolArchives(QtArchives):
"""Hold tool archive package list
when installing mingw tool, argument would be
ToolArchive(windows, desktop, 4.9.1-3, mingw)
when installing ifw tool, argument would be
ToolArchive(linux, desktop, 3.1.1, ifw)
"""
def __init__(self, os_name, tool_name, version, arch, base, timeout=(5, 5)):
self.tool_name = tool_name
self.os_name = os_name
self.logger = getLogger("aqt.archives")
super(ToolArchives, self).__init__(
os_name, "desktop", version, arch, base, timeout=timeout
)
def _get_archives(self):
if self.os_name == "windows":
archive_url = self.base + self.os_name + "_x86/" + self.target + "/"
else:
archive_url = self.base + self.os_name + "_x64/" + self.target + "/"
update_xml_url = "{0}{1}/Updates.xml".format(archive_url, self.tool_name)
self._download_update_xml(update_xml_url) # call super method.
self._parse_update_xml(archive_url, [])
def _parse_update_xml(self, archive_url, target_packages):
try:
self.update_xml = ElementTree.fromstring(self.update_xml_text)
except ElementTree.ParseError as perror:
self.logger.error("Downloaded metadata is corrupted. {}".format(perror))
raise ArchiveListError("Downloaded metadata is corrupted.")
else:
for packageupdate in self.update_xml.iter("PackageUpdate"):
name = packageupdate.find("Name").text
if name != self.arch:
continue
_archives = packageupdate.find("DownloadableArchives").text
if _archives is not None:
downloadable_archives = _archives.split(", ")
else:
downloadable_archives = []
named_version = packageupdate.find("Version").text
full_version = Version(named_version)
if full_version.truncate("patch") != self.version.truncate("patch"):
self.logger.warning(
"Base Version of {} is different from requested version {} -- skip.".format(
named_version, self.version
)
)
continue
package_desc = packageupdate.find("Description").text
for archive in downloadable_archives:
package_url = (
archive_url
+ self.tool_name
+ "/"
+ name
+ "/"
+ named_version
+ archive
)
hashurl = package_url + ".sha1"
self.archives.append(
QtPackage(name, package_url, archive, package_desc, hashurl)
)
def get_target_config(self) -> TargetConfig:
"""Get target configuration.
:return tuple of three parameter, "Tools", target and arch
"""
return TargetConfig("Tools", self.target, self.arch, self.os_name)
``` |
{
"source": "jmarrec/Corrie",
"score": 2
} |
#### File: corrie/interface/frame.py
```python
import wx
import json
import os
import sys
import subprocess
from pubsub import pub
from concurrent import futures
from corrie.interface import general_options
from corrie.utility.run_simulation import RunSimulation
from corrie.utility.initialize_data import InitializeData
# wx callbacks need an event argument even though we usually don't use it, so the next line disables that check
# noinspection PyUnusedLocal
class CorrieFrame(wx.Frame):
OutputToolbarIconSize = (16, 15)
current_file_name = 'untitled.corrie'
def __init__(self, *args, **kwargs):
kwargs["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwargs)
pub.subscribe(self.listener_update_statusbar, "listenerUpdateStatusBar")
# Set the title!
self.set_window_title_with_filename(self.current_file_name)
self.SetSize(900, 700)
# set the window exit
self.Bind(wx.EVT_CLOSE, self.handle_frame_close)
self.status_bar = self.CreateStatusBar(1)
self.building_choice = None
self.front_faces_choice = None
self.baseline_code_choice = None
#self.width_field = None
#self.depth_field = None
self.powerpoint_filepick = None
self.excel_filepick = None
self.weather_filepick = None
self.occ_areas_text_controls = []
self.slide_list = None
self.slide_details_box = None
self.value_choice = None
self.select_mode_choice = None
self.incremental_checkbox = None
self.general_options_values = None
self.all_slide_details = self.populate_all_slide_details()
self.general_options_values = self.populate_general_options()
self.build_menu()
self.gui_build()
self.Refresh()
self.run_simulation = RunSimulation()
self.future = None
self.canceled = False
# self.print_standard_paths()
def handle_frame_close(self, event):
self.Destroy()
def gui_build(self):
pnl = wx.Panel(self)
initializer = InitializeData()
self.building_dict = initializer.populate_buildings()
building_label = wx.StaticText(pnl, label='Building')
building_options = list(self.building_dict.keys())
self.building_choice = wx.Choice(pnl, choices=building_options)
self.building_choice.SetSelection(3)
self.Bind(wx.EVT_CHOICE, self.handle_building_choice_select, self.building_choice)
front_faces_label = wx.StaticText(pnl, label='Front Faces')
front_faces_option = ['North', 'North East', 'East', 'South East', 'South', 'South West', 'West', 'North West']
self.front_faces_choice = wx.Choice(pnl, choices=front_faces_option)
self.front_faces_choice.SetSelection(0)
baseline_code_label = wx.StaticText(pnl, label='Baseline Code')
baseline_code_option = ['', ]
self.baseline_code_choice = wx.Choice(pnl, choices=baseline_code_option)
self.baseline_code_choice.SetSelection(0)
building_hbox = wx.BoxSizer(wx.HORIZONTAL)
building_hbox.Add(building_label, 0, wx.ALL, 10)
building_hbox.Add(self.building_choice, 1, wx.ALL | wx.EXPAND, 10)
building_hbox.Add(front_faces_label, 0, wx.ALL, 10)
building_hbox.Add(self.front_faces_choice, 1, wx.ALL | wx.EXPAND, 10)
building_hbox.Add(baseline_code_label, 0, wx.ALL, 10)
building_hbox.Add(self.baseline_code_choice, 1, wx.ALL | wx.EXPAND, 10)
#lot_boundaries_label = wx.StaticText(pnl, label='Lot Boundaries (feet)')
#width_label = wx.StaticText(pnl, label='Width')
#self.width_field = wx.TextCtrl(pnl, value="500")
#depth_label = wx.StaticText(pnl, label='Depth')
#self.depth_field = wx.TextCtrl(pnl, value="500")
#lot_hbox = wx.BoxSizer(wx.HORIZONTAL)
#lot_hbox.Add(lot_boundaries_label, 0, wx.ALL, 10)
#lot_hbox.Add(width_label, 0, wx.ALL, 10)
#lot_hbox.Add(self.width_field, 1, wx.ALL | wx.EXPAND, 10)
#lot_hbox.Add(depth_label, 0, wx.ALL, 10)
#lot_hbox.Add(self.depth_field, 1, wx.ALL | wx.EXPAND, 10)
powerpoint_label = wx.StaticText(pnl, label='PowerPoint File', size=(90, -1))
self.powerpoint_filepick = wx.FilePickerCtrl(pnl, style=wx.FLP_DEFAULT_STYLE | wx.FLP_SMALL, message='Select the PowerPoint file', wildcard='PowerPoint files (*.pptx)|*.pptx')
powerpoint_hbox = wx.BoxSizer(wx.HORIZONTAL)
powerpoint_hbox.Add(powerpoint_label, 0, wx.ALL, 10)
powerpoint_hbox.Add(self.powerpoint_filepick, 1, wx.ALL | wx.EXPAND, 10)
excel_label = wx.StaticText(pnl, label='Excel File', size=(90, -1))
self.excel_filepick = wx.FilePickerCtrl(pnl, style=wx.FLP_DEFAULT_STYLE | wx.FLP_SMALL, message='Select the Excel file', wildcard='Excel files (*.xlsx)|*.xlsx')
excel_hbox = wx.BoxSizer(wx.HORIZONTAL)
excel_hbox.Add(excel_label, 0, wx.ALL, 10)
excel_hbox.Add(self.excel_filepick, 1, wx.ALL | wx.EXPAND, 10)
weather_label = wx.StaticText(pnl, label='Weather File', size=(90, -1))
self.weather_filepick = wx.FilePickerCtrl(pnl, style=wx.FLP_DEFAULT_STYLE | wx.FLP_SMALL, message='Select the EnergyPlus Weather file', wildcard='EnergyPlus Weather files (*.epw)|*.epw')
weather_hbox = wx.BoxSizer(wx.HORIZONTAL)
weather_hbox.Add(weather_label, 0, wx.ALL, 10)
weather_hbox.Add(self.weather_filepick, 1, wx.ALL | wx.EXPAND, 10)
occ_area_label = wx.StaticText(pnl, -1, "Occupancy Areas")
sqft_label = wx.StaticText(pnl, -1, "(square feet)")
occ_area_sizer = wx.FlexGridSizer(cols=2, vgap=5, hgap=5)
occ_area_sizer.AddGrowableCol(0)
occ_area_sizer.AddGrowableCol(1)
occ_area_sizer.Add(occ_area_label, 0, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL | wx.TOP | wx.BOTTOM, 5)
occ_area_sizer.Add(sqft_label, 0, wx.TOP | wx.BOTTOM, 5)
max_num_occ_areas = 10
for count in range(max_num_occ_areas):
label = wx.StaticText(pnl, -1, 'xxxx', size=(150, -1))
text_control = wx.TextCtrl(pnl, -1, '0', size=(80, -1))
self.occ_areas_text_controls.append((label, text_control))
occ_area_sizer.Add(label, 0, wx.ALIGN_CENTER_VERTICAL | wx.TOP | wx.BOTTOM, 5) # wx.ALIGN_RIGHT |
occ_area_sizer.Add(text_control, 0, wx.TOP, 5)
occ_areas = {'Quick Service Restaurant': 30000, 'Full Service Restaurant': 0, 'Retail Standalone': 0, 'junk': 0}
self.update_occ_areas(occ_areas)
#call to initialize the screen
self.handle_building_choice_select(wx.EVT_CHOICE)
slides_label = wx.StaticText(pnl, label='Slides')
slide_list_text = list(self.all_slide_details.keys())
slide_list_order = list(range(len(slide_list_text)))
self.slide_list = wx.RearrangeCtrl(pnl, 1, size=wx.DefaultSize, items=slide_list_text, order=slide_list_order)
slide_list_ctrl = self.slide_list.GetList()
slide_list_ctrl.SetCheckedStrings([slide_list_text[-1],]) #set only the last slide (lighting power density)
self.Bind(wx.EVT_LISTBOX, self.handle_slide_list_ctrl_click, slide_list_ctrl)
slide_list_ctrl.SetSelection(0)
slides_sizer = wx.BoxSizer(wx.VERTICAL)
slides_sizer.Add(slides_label, 0, wx.ALL, 5)
slides_sizer.Add(self.slide_list, 1, wx.ALL | wx.EXPAND, 5)
self.slide_details_box = wx.StaticBox(pnl, -1, "Slide Details for: Aspect Ratio")
top_border, other_border = self.slide_details_box.GetBordersForSizer()
slide_details_sizer = wx.BoxSizer(wx.VERTICAL)
slide_details_sizer.AddSpacer(top_border)
select_mode_hbox = wx.BoxSizer(wx.HORIZONTAL)
select_mode_label = wx.StaticText(self.slide_details_box, label='Selection Mode')
select_mode_options = ['Automatic', 'Exclude Best Option', 'Exclude Two Best Options',
'Exclude Three Best Options', 'Select Option 1', 'Select Option 2', 'Select Option 3',
'Select Option 4', 'Select Option 5', 'Select Option 6', 'Select Option 7',
'Select Option 8']
self.select_mode_choice = wx.Choice(self.slide_details_box, choices=select_mode_options)
self.select_mode_choice.SetSelection(0)
self.Bind(wx.EVT_CHOICE, self.handle_select_mode_choice_select, self.select_mode_choice)
select_mode_hbox.Add(select_mode_label, 0, wx.ALL, 5)
select_mode_hbox.Add(self.select_mode_choice, 1, wx.ALL, 5)
slide_details_sizer.Add(select_mode_hbox, 0, wx.ALL, 5)
slide_details_sizer.Hide(select_mode_hbox, recursive=True)
option_simulated_label = wx.StaticText(self.slide_details_box, label="Options Simulated")
slide_details_sizer.Add(option_simulated_label, 0, wx.ALL, 5)
value_options = ['none',]
self.value_choice = wx.CheckListBox(self.slide_details_box, 1, size=wx.DefaultSize, choices=value_options)
self.Bind(wx.EVT_CHECKLISTBOX, self.handle_value_choice_check, self.value_choice)
slide_details_sizer.Add(self.value_choice, 1, wx.ALL | wx.EXPAND, 5)
self.incremental_checkbox = wx.CheckBox(self.slide_details_box, label='Include in Incremental Improvements')
selection_mode, include_incremental, options_list, osw_list = self.all_slide_details[slide_list_ctrl.GetString((slide_list_ctrl.GetSelection()))]
self.set_slide_details(selection_mode, include_incremental, options_list)
self.Bind(wx.EVT_CHECKBOX, self.handle_incremental_checkbox_check, self.incremental_checkbox)
slide_details_sizer.Add(self.incremental_checkbox, 0, wx.ALL, 5)
self.incremental_checkbox.Hide() # hide portions of the GUI that are not yet implemented
self.slide_details_box.SetSizer(slide_details_sizer)
self.run_simulations_button = wx.Button(pnl, 1, "Run Simulations", size=(140, 30))
self.run_simulations_button.Bind(wx.EVT_BUTTON, self.handle_run_simulation_button)
self.cancel_simulations_button = wx.Button(pnl, 1, "Cancel Simulations", size=(140, 30))
self.cancel_simulations_button.Bind(wx.EVT_BUTTON, self.handle_cancel_simulation_button)
self.cancel_simulations_button.Disable()
run_cancel_sizer = wx.BoxSizer(wx.HORIZONTAL)
run_cancel_sizer.Add(self.run_simulations_button, 0, wx.ALL | wx.ALIGN_RIGHT, 5)
run_cancel_sizer.Add(self.cancel_simulations_button, 0, wx.ALL | wx.ALIGN_RIGHT, 5)
bottom_right_sizer = wx.BoxSizer(wx.VERTICAL)
bottom_right_sizer.Add(self.slide_details_box, 1, wx.ALL | wx.ALIGN_TOP |wx.EXPAND, 5)
bottom_right_sizer.Add(run_cancel_sizer, 0, wx.ALL | wx.ALIGN_BOTTOM | wx.ALIGN_RIGHT | wx.EXPAND, 5)
bottom_hbox = wx.BoxSizer(wx.HORIZONTAL)
bottom_hbox.Add(occ_area_sizer, 1, wx.ALL | wx.EXPAND, 5)
bottom_hbox.Add(slides_sizer, 2, wx.ALL | wx.EXPAND, 5)
bottom_hbox.Add(bottom_right_sizer, 2, wx.ALL | wx.EXPAND, 5)
main_vbox = wx.BoxSizer(wx.VERTICAL)
main_vbox.Add(building_hbox, 0, wx.EXPAND | wx.LEFT, border=20)
#main_vbox.Add(lot_hbox, 0, wx.EXPAND | wx.LEFT, border=20)
main_vbox.Add(powerpoint_hbox, 0, wx.EXPAND | wx.LEFT | wx.RIGHT, border=20)
main_vbox.Add(excel_hbox, 0, wx.EXPAND | wx.LEFT | wx.RIGHT, border=20)
main_vbox.Add(weather_hbox, 0, wx.EXPAND | wx.LEFT | wx.RIGHT, border=20)
main_vbox.Add(bottom_hbox, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, border=20)
main_vbox.Add(self.status_bar,0)
main_vbox.Hide(excel_hbox, recursive=True) # hide portions of the GUI that are not yet implemented
pnl.SetSizer(main_vbox)
pnl.Fit()
def update_occ_areas(self, occ_areas):
for (label, text_control) in self.occ_areas_text_controls:
label.SetLabel('')
text_control.Show(False)
for index, (name, area) in enumerate(occ_areas.items()):
label, text_control = self.occ_areas_text_controls[index]
label.SetLabel(name)
text_control.Show(True)
text_control.SetValue(str(area))
def populate_all_slide_details(self):
all_slide_details = {}
#aspect ratio
aspect_ratio_options = []
aspect_ratio_options.append(["width 1.0 : depth 3.0", True, '0.33'])
aspect_ratio_options.append(["width 1.0 : depth 2.5", False, '0.40'])
aspect_ratio_options.append(["width 1.0 : depth 2.0", True, '0.50'])
aspect_ratio_options.append(["width 1.0 : depth 1.5", False, '0.67'])
aspect_ratio_options.append(["width 1.0 : depth 1.0 (square)", True, '1.00'])
aspect_ratio_options.append(["width 1.5 : depth 1.0", False, '1.5'])
aspect_ratio_options.append(["width 2.0 : depth 1.0", True, '2.0'])
aspect_ratio_options.append(["width 2.5 : depth 1.0", False, '2.5'])
aspect_ratio_options.append(["width 3.0 : depth 1.0", True, '3.0'])
all_slide_details['Aspect Ratio'] = ['Automatic',True, aspect_ratio_options, [('CreateBarFromBuildingTypeRatios','ns_to_ew_ratio'),]]
stories_options = []
stories_options.append(["One Floor", True, '1'])
stories_options.append(["Two Floors", True, '2'])
stories_options.append(["Three Floors", True, '3'])
stories_options.append(["Four Floors", False, '4'])
all_slide_details['Number of Stories'] = ['Exclude Best Option',False, stories_options, [('CreateBarFromBuildingTypeRatios','num_stories_above_grade'),]]
orientation_options = []
orientation_options.append(["Entrance Faces North", True, '0'])
orientation_options.append(["Entrance Faces North East", False, '45'])
orientation_options.append(["Entrance Faces East", True, '90'])
orientation_options.append(["Entrance Faces South East", False, '135'])
orientation_options.append(["Entrance Faces South", True, '180'])
orientation_options.append(["Entrance Faces South West", False, '225'])
orientation_options.append(["Entrance Faces West", True, '270'])
orientation_options.append(["Entrance Faces North West", False, '315'])
all_slide_details['Orientation'] = ['Automatic',False, orientation_options, [('CreateBarFromBuildingTypeRatios','building_rotation'),]]
wall_insulation_options = []
wall_insulation_options.append(["R15", False, '15'])
wall_insulation_options.append(["R17", False, '17'])
wall_insulation_options.append(["R19", True, '19'])
wall_insulation_options.append(["R23", True, '23'])
wall_insulation_options.append(["R27", True, '27'])
wall_insulation_options.append(["R31", False, '31'])
wall_insulation_options.append(["R35", False, '35'])
all_slide_details['Wall Insulation'] = ['Automatic', True, wall_insulation_options, [('IncreaseInsulationRValueForExteriorWalls','r_value'),]]
roof_insulation_options = []
roof_insulation_options.append(["R25", False, '25'])
roof_insulation_options.append(["R29", True, '29'])
roof_insulation_options.append(["R33", True, '33'])
roof_insulation_options.append(["R37", True, '37'])
roof_insulation_options.append(["R41", True, '41'])
roof_insulation_options.append(["R45", True, '45'])
roof_insulation_options.append(["R50", False, '50'])
roof_insulation_options.append(["R55", False, '55'])
roof_insulation_options.append(["R60", False, '60'])
all_slide_details['Roof Insulation'] = ['Automatic', True, roof_insulation_options, [('IncreaseInsulationRValueForRoofs','r_value'),]]
wwr_options = []
wwr_options.append(["2%", False, '0.02'])
wwr_options.append(["4%", False, '0.04'])
wwr_options.append(["6%", False, '0.06'])
wwr_options.append(["8%", False, '0.08'])
wwr_options.append(["10%", False, '0.10'])
wwr_options.append(["15%", False, '0.15'])
wwr_options.append(["20%", False, '0.20'])
wwr_options.append(["25%", True, '0.25'])
wwr_options.append(["30%", True, '0.30'])
wwr_options.append(["35%", True, '0.35'])
wwr_options.append(["40%", True, '0.40'])
wwr_options.append(["45%", True, '0.45'])
wwr_options.append(["50%", True, '0.50'])
wwr_options.append(["60%", False, '0.60'])
wwr_options.append(["70%", False, '0.70'])
wwr_options.append(["80%", False, '0.80'])
all_slide_details['Window to wall ratio'] = ['Automatic', True, wwr_options, [('CreateBarFromBuildingTypeRatios','wwr'),]]
# fenestration_options = []
# # from CSBR-UMN (2013) - See MaxTech final report
# fenestration_options.append(["U-Factor=0.99 SHGC=0.72 Tvis=0.74", False])
# fenestration_options.append(["U-Factor=0.55 SHGC=0.61 Tvis=0.64", False])
# fenestration_options.append(["U-Factor=0.55 SHGC=0.45 Tvis=0.39", False])
# fenestration_options.append(["U-Factor=0.53 SHGC=0.18 Tvis=0.08", False])
# fenestration_options.append(["U-Factor=0.39 SHGC=0.27 Tvis=0.43", True])
# fenestration_options.append(["U-Factor=0.39 SHGC=0.23 Tvis=0.30", False])
# fenestration_options.append(["U-Factor=0.39 SHGC=0.35 Tvis=0.57", True])
# fenestration_options.append(["U-Factor=0.38 SHGC=0.26 Tvis=0.52", True])
# fenestration_options.append(["U-Factor=0.22 SHGC=0.28 Tvis=0.49", True])
# fenestration_options.append(["U-Factor=0.21 SHGC=0.19 Tvis=0.28", False])
# fenestration_options.append(["U-Factor=0.97 SHGC=0.44 Tvis=0.50", True])
# fenestration_options.append(["U-Factor=0.55 SHGC=0.48 Tvis=0.44", True])
# all_slide_details['Fenestration Options'] = ['Automatic',True, fenestration_options]
# the overhang measure is OS seems to always fail - need to debug
# overhang_options = []
# overhang_options.append(["Depth is 0.2 x Window Height", True, '0.2'])
# overhang_options.append(["Depth is 0.3 x Window Height", False, '0.3'])
# overhang_options.append(["Depth is 0.4 x Window Height", True, '0.4'])
# overhang_options.append(["Depth is 0.5 x Window Height", False, '0.5'])
# overhang_options.append(["Depth is 0.6 x Window Height", True, '0.6'])
# overhang_options.append(["Depth is 0.7 x Window Height", False, '0.7'])
# overhang_options.append(["Depth is 0.8 x Window Height", True, '0.8'])
# # note also have to have arguments "facade" : "All", "function" : "Add"
# all_slide_details['Window Overhang'] = ['Automatic',True, overhang_options, [('AddRemoveOrReplaceWindowOverhangs','projection_factor'),]]
lighting_options = []
lighting_options.append(["0.40 W/sqft", False, '0.40'])
lighting_options.append(["0.45 W/sqft", False, '0.45'])
lighting_options.append(["0.50 W/sqft", False, '0.50'])
lighting_options.append(["0.55 W/sqft", False, '0.55'])
lighting_options.append(["0.60 W/sqft", True, '0.60'])
lighting_options.append(["0.65 W/sqft", False, '0.65'])
lighting_options.append(["0.70 W/sqft", False, '0.70'])
lighting_options.append(["0.75 W/sqft", False, '0.75'])
lighting_options.append(["0.80 W/sqft", True, '0.80'])
lighting_options.append(["0.85 W/sqft", False, '0.85'])
lighting_options.append(["0.90 W/sqft", False, '0.90'])
lighting_options.append(["0.95 W/sqft", False, '0.95'])
lighting_options.append(["1.00 W/sqft", False, '1.00'])
lighting_options.append(["1.05 W/sqft", False, '1.05'])
all_slide_details['Lighting Power Density'] = ['Automatic',True, lighting_options, [('SetLightingLoadsByLPD','lpd'),]]
return all_slide_details
def handle_slide_list_ctrl_click(self, event):
self.refresh_slide_list_details()
def refresh_slide_list_details(self):
slide_list_ctrl = self.slide_list.GetList()
slide_selected = slide_list_ctrl.GetString(slide_list_ctrl.GetSelection())
self.slide_details_box.SetLabel("Slide Details for: " + slide_selected)
selection_mode, include_incremental, options_list, osw_list = self.all_slide_details[slide_selected]
self.set_slide_details(selection_mode, include_incremental, options_list)
def set_slide_details(self, selection_mode, include_incremental, options_list):
# set options
items_from_list = [x[0] for x in options_list]
selected_options = []
# print('options_list ',options_list)
for option, flag, argument_value in options_list:
if flag:
selected_options.append(option)
self.value_choice.SetItems(items_from_list)
self.value_choice.SetCheckedStrings(selected_options)
# set selection mode
select_mode_selected_index = self.select_mode_choice.FindString(selection_mode)
self.select_mode_choice.SetSelection(select_mode_selected_index)
# set incremental choice
self.incremental_checkbox.SetValue(include_incremental)
def handle_incremental_checkbox_check(self, event):
slide_list_ctrl = self.slide_list.GetList()
slide_selected = slide_list_ctrl.GetString(slide_list_ctrl.GetSelection())
selection_mode, include_incremental, options_list, osw_list = self.all_slide_details[slide_selected]
self.all_slide_details[slide_selected] = [selection_mode, self.incremental_checkbox.GetValue(), options_list, osw_list]
def handle_select_mode_choice_select(self, event):
slide_list_ctrl = self.slide_list.GetList()
slide_selected = slide_list_ctrl.GetString(slide_list_ctrl.GetSelection())
selection_mode, include_incremental, options_list, osw_list = self.all_slide_details[slide_selected]
self.all_slide_details[slide_selected] = [self.select_mode_choice.GetString(self.select_mode_choice.GetSelection()), include_incremental, options_list, osw_list]
def handle_value_choice_check(self, event):
#print(self.value_choice.IsChecked(event.GetInt()))
value_options = []
for index in range(self.value_choice.GetCount()):
#print(self.value_choice.GetString(index),self.value_choice.IsChecked(index))
value_options.append([self.value_choice.GetString(index),self.value_choice.IsChecked(index)])
slide_list_ctrl = self.slide_list.GetList()
slide_selected = slide_list_ctrl.GetString(slide_list_ctrl.GetSelection())
selection_mode, include_incremental, old_options_list, osw_list = self.all_slide_details[slide_selected]
# print('old_options_list', old_options_list)
# print('value_options', value_options)
option_argument_dictionary = {k:v for (k, _, v) in old_options_list}
# print('option_argument_dictionary', option_argument_dictionary)
new_options_list = [[choice, check_status, option_argument_dictionary[choice]] for choice, check_status in value_options ]
# print('new_options_list', new_options_list)
self.all_slide_details[slide_selected] = [selection_mode, include_incremental, new_options_list, osw_list]
def build_menu(self):
menu_bar = wx.MenuBar()
file_menu = wx.Menu()
#menu_file_new = file_menu.Append(101, "&New", "Create a new Corrie file")
menu_file_open = file_menu.Append(102, "&Open...", "Open an existing Corrie file")
self.Bind(wx.EVT_MENU, self.handle_file_open, menu_file_open)
file_menu.AppendSeparator()
menu_file_save = file_menu.Append(103, "&Save\tCtrl-S", "Save file to a same file name")
self.Bind(wx.EVT_MENU, self.handle_file_save, menu_file_save)
menu_file_save_as = file_menu.Append(104, "Save &As...", "Save file to a new file name")
self.Bind(wx.EVT_MENU, self.handle_file_save_as, menu_file_save_as)
file_menu.AppendSeparator()
# menu_file_close = file_menu.Append(105, "&Close", "Close the file")
# file_menu.AppendSeparator()
menu_file_exit = file_menu.Append(106, "E&xit", "Exit the application")
self.Bind(wx.EVT_MENU, self.handle_quit, menu_file_exit)
menu_bar.Append(file_menu, "&File")
option_menu = wx.Menu()
# menu_option_general = option_menu.Append(201, "&General Options...", "General settings for this file.")
# self.Bind(wx.EVT_MENU, self.handle_menu_option_general, menu_option_general)
menu_option_regen_output = option_menu.Append(202, "Regenerate Presentation")
self.Bind(wx.EVT_MENU, self.handle_menu_option_regen_output, menu_option_regen_output)
menu_bar.Append(option_menu, "&Option")
# help_menu = wx.Menu()
# menu_help_topic = help_menu.Append(301, "&Topic...", "Get help.")
# menu_help_about = help_menu.Append(302, "&About...", "About Corrie.")
# menu_bar.Append(help_menu, "&Help")
self.SetMenuBar(menu_bar)
def handle_quit(self, e):
self.Close()
def handle_menu_option_general(self, event):
dialog_general_options = general_options.GeneralOptionsDialog(None)
dialog_general_options.set_parameters(self.general_options_values)
return_value = dialog_general_options.ShowModal()
if return_value == dialog_general_options.CLOSE_SIGNAL_CANCEL:
return
else: #ok pressed
self.general_options_values = dialog_general_options.general_options_dict
dialog_general_options.Destroy()
def populate_general_options(self):
columns_of_values_dict = {'Source Energy Use Intensity':True,
'Site Energy Use Intensity':True,
'Total Source Energy':True,
'Total Site Energy':True,
'Total CO2':True,
'Cooling Energy':True,
'Heating Energy':True,
'Lighting Energy':False,
'Plug Energy':False,
'Total Electricity Usage':True,
'Total Natural Gas Usage':True}
options_selected = {'Output Metric':'Annual CO2',
'Units':'Inch-Pound',
'Chart Type':'Vertical Column',
'Chart Sort Options':'Ascending',
'Show Cumulative Chart Slide':False,
'Show End Use Pie Chart Slide':True,
'Show End Use Monthly Chart Slide':False,
'Number of Rows Per Slide': '15',
'Tab Name':'CorrieResults',
'Columns of Values':columns_of_values_dict}
return options_selected
def construct_save_data(self):
save_data = {}
save_data['building'] = self.building_choice.GetString(self.building_choice.GetSelection())
save_data['frontFaces'] = self.front_faces_choice.GetString(self.front_faces_choice.GetSelection())
save_data['baselineCode'] = self.baseline_code_choice.GetString(self.baseline_code_choice.GetSelection())
#save_data['width'] = self.width_field.GetValue()
#save_data['depth'] = self.depth_field.GetValue()
save_data['powerpointPath'] = self.powerpoint_filepick.GetPath()
save_data['excelPath'] = self.excel_filepick.GetPath()
save_data['weatherPath'] = self.weather_filepick.GetPath()
occ_area_save_data = {}
for (label, text_control) in self.occ_areas_text_controls:
if label.GetLabel():
occ_area_save_data[label.GetLabel()] = text_control.GetValue()
save_data['occupancyAreas'] = occ_area_save_data
save_data['slideDetails'] = self.all_slide_details
slide_list_ctrl = self.slide_list.GetList()
slide_names_in_order = []
for index in range(slide_list_ctrl.GetCount()):
slide_names_in_order.append([slide_list_ctrl.GetString(index),slide_list_ctrl.IsChecked(index)])
save_data['slideOrder'] = slide_names_in_order
save_data['generalOptions'] = self.general_options_values
#print(json.dumps(save_data, indent=4))
return save_data
def handle_file_save(self, event):
#print("handle_file_save entered")
save_data = self.construct_save_data()
with open(self.current_file_name, 'w') as corrie_file:
json.dump(save_data, corrie_file, indent=4)
def handle_file_save_as(self, event):
with wx.FileDialog(self, "Save Corrie File", wildcard="Corrie files (*.corrie)|*.corrie",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
self.current_file_name = fileDialog.GetPath()
self.set_window_title_with_filename(self.current_file_name)
root, ext = os.path.splitext(self.current_file_name)
new_powerpoint_name = root + '-presentation.pptx'
self.powerpoint_filepick.SetPath(new_powerpoint_name)
save_data = self.construct_save_data()
with open(self.current_file_name, 'w') as corrie_file:
json.dump(save_data, corrie_file, indent=4)
def set_window_title_with_filename(self, filename):
self.SetTitle("Corrie" + ' - ' + filename)
def handle_file_open(self, event):
#if self.contentNotSaved:
# if wx.MessageBox("Current content has not been saved! Proceed?", "Please confirm",
# wx.ICON_QUESTION | wx.YES_NO, self) == wx.NO:
# return
#
with wx.FileDialog(self, "Open Corrie File", wildcard="Corrie files (*.corrie)|*.corrie",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
path_name = fileDialog.GetPath()
with open(path_name, 'r') as corrie_file:
load_data = json.load(corrie_file)
self.read_load_data(load_data)
self.current_file_name = path_name
self.set_window_title_with_filename(self.current_file_name)
def read_load_data(self, load_data):
print(json.dumps(load_data, indent=4))
self.building_choice.SetSelection(self.building_choice.FindString(load_data['building']))
self.front_faces_choice.SetSelection(self.front_faces_choice.FindString(load_data['frontFaces']))
self.baseline_code_choice.SetSelection(self.baseline_code_choice.FindString(load_data['baselineCode']))
#self.width_field.SetValue(load_data['width'])
#self.depth_field.SetValue(load_data['depth'])
self.powerpoint_filepick.SetPath(load_data['powerpointPath'])
self.excel_filepick.SetPath(load_data['excelPath'])
self.weather_filepick.SetPath(load_data['weatherPath'])
occupancy_area_data = load_data['occupancyAreas']
self.update_occ_areas(occupancy_area_data)
self.all_slide_details = load_data['slideDetails']
slide_list_ctrl = self.slide_list.GetList()
slide_list_ctrl.Clear()
slide_names_in_order = load_data['slideOrder']
for index, (slide_name, slide_checked) in enumerate(slide_names_in_order):
slide_list_ctrl.Append(slide_name)
slide_list_ctrl.Check(index, slide_checked)
slide_list_ctrl.SetSelection(0)
self.refresh_slide_list_details()
self.general_options_values = load_data['generalOptions']
def handle_run_simulation_button(self, event):
print('handle_run_simulation_button')
self.status_bar.SetStatusText('handle_run_simulation_button')
if self.current_file_name == 'untitled.corrie' or not self.powerpoint_filepick.GetPath() or not self.weather_filepick.GetPath():
wx.MessageBox('No simulation can be performed before: \n (1) saving the current file \n (2) choosing a PowerPoint file and \n (3) choosing a weather file.','Info', wx.OK)
return
if not self.run_simulation.weather_file_validation(self.weather_filepick.GetPath()):
wx.MessageBox('No simulation can be performed without a matching .epw, .stat, and .ddy file.','Info', wx.OK)
return
self.canceled = False
self.run_simulations_button.Disable()
self.cancel_simulations_button.Enable()
current_save_data = self.construct_save_data()
self.run_simulation.saved_data = current_save_data
self.run_simulation.set_current_file_name(self.current_file_name)
ex = futures.ThreadPoolExecutor(max_workers = 2)
# future = ex.submit(self.task) #works
# future = ex.submit(self.run_simulation.run_sim_task) #works
self.future = ex.submit(self.run_simulation.run_simulations)
self.future.add_done_callback(self.after_done)
return
def task(self):
subprocess.run(['C:/openstudio-2.8.0/bin/openstudio.exe', 'run', '-w',
'D:/SBIR/corrie-test-files/test-corrie-C_simulations/Roof_Insulation__R60.osw'],
cwd='D:/SBIR/corrie-test-files/test-corrie-C_simulations/')
def after_done(self, fn):
print(str(fn.result()))
# self.run_simulation.run_simulations()
self.run_simulations_button.Enable()
self.cancel_simulations_button.Disable()
if not self.canceled:
print('All simulation complete.')
self.status_bar.SetStatusText('All simulation complete.')
results = self.run_simulation.collect_results()
self.run_simulation.populate_powerpoint()
def handle_cancel_simulation_button(self, event):
if self.future:
self.run_simulation.terminate_process()
self.future.cancel()
self.run_simulations_button.Enable()
self.cancel_simulations_button.Disable()
self.status_bar.SetStatusText('Simulations Canceled.')
self.canceled = True
def print_standard_paths(self):
sp = wx.StandardPaths.Get()
print("AppDocumentsDir", sp.AppDocumentsDir)
print("ConfigDir", sp.ConfigDir)
print("DataDir", sp.DataDir)
print("DocumentsDir", sp.DocumentsDir)
print("ExecutablePath", sp.ExecutablePath)
print("InstallPrefix", sp.InstallPrefix)
print("LocalDataDir", sp.LocalDataDir)
print("PluginsDir", sp.PluginsDir)
print("ResourcesDir", sp.ResourcesDir)
print("TempDir", sp.TempDir)
print("UserConfigDir", sp.UserConfigDir)
print("UserDataDir", sp.UserDataDir)
print("UserLocalDataDir", sp.UserLocalDataDir)
print("current working directory", os.getcwd())
print("__file__", os.path.realpath(__file__))
def handle_building_choice_select(self, event):
building_selected = self.building_choice.GetString(self.building_choice.GetSelection())
# print(self.building_dict[building_selected].building_type)
self.baseline_code_choice.Clear()
codes_with_ashrae = ['ASHRAE ' + code for code in self.building_dict[building_selected].codes_available]
self.baseline_code_choice.AppendItems(codes_with_ashrae)
self.baseline_code_choice.SetSelection(0)
_, text_control = self.occ_areas_text_controls[0] #get the current text control so can use existing value of the area
occupany_areas = {}
occupany_areas[self.building_dict[building_selected].display_string] = text_control.GetValue()
for other_building in self.building_dict[building_selected].other_buildings_available:
occupany_areas[other_building] = 0
self.update_occ_areas(occupany_areas)
def listener_update_statusbar(self, message):
self.status_bar.SetStatusText(message)
def handle_menu_option_regen_output(self, event):
print('handle_menu_option_regen_output')
self.status_bar.SetStatusText('handle_menu_option_regen_output')
current_save_data = self.construct_save_data()
self.run_simulation.saved_data = current_save_data
self.run_simulation.set_current_file_name(self.current_file_name)
results = self.run_simulation.collect_results()
print('results: ', results)
self.run_simulation.populate_powerpoint()
```
#### File: corrie/interface/general_options.py
```python
import wx
class GeneralOptionsDialog(wx.Dialog):
CLOSE_SIGNAL_OK = 0
CLOSE_SIGNAL_CANCEL = 1
general_options_dict = None
output_metric_choice = None
units_choice = None
chart_type_choice = None
sort_option_choice = None
show_cumulative_checkbox = None
show_end_use_pie_checkbox = None
show_end_use_monthly_checkbox = None
num_rows_choice = None
tab_name_field = None
columns_of_values_list = None
def __init__(self, *args, **kwargs):
super(GeneralOptionsDialog, self).__init__(*args, **kwargs)
self.Bind(wx.EVT_CLOSE, self.handle_cancel_button)
self.initialize_ui()
def initialize_ui(self):
pnl = wx.Panel(self)
dialog_vbox = wx.BoxSizer(wx.VERTICAL)
top_hbox = wx.BoxSizer(wx.HORIZONTAL)
misc_option_vbox = wx.BoxSizer(wx.VERTICAL)
output_metric_hbox = wx.BoxSizer(wx.HORIZONTAL)
output_metric_label = wx.StaticText(pnl, label='Output Metric')
output_metric_options = ['Source Energy Use Intensity', 'Site Energy Use Intensity','Annual CO2']
self.output_metric_choice = wx.Choice(pnl, choices=output_metric_options)
output_metric_hbox.Add(output_metric_label, 0, wx.ALL, 5)
output_metric_hbox.Add(self.output_metric_choice, 1, wx.ALL, 5)
misc_option_vbox.Add(output_metric_hbox, 0, wx.ALL, 5)
units_hbox = wx.BoxSizer(wx.HORIZONTAL)
units_label = wx.StaticText(pnl, label='Units')
units_options = ['Inch-Pound', "SI (Metric)"]
self.units_choice = wx.Choice(pnl, choices=units_options)
units_hbox.Add(units_label, 0, wx.ALL, 5)
units_hbox.Add(self.units_choice, 1, wx.ALL, 5)
misc_option_vbox.Add(units_hbox, 0, wx.ALL, 5)
chart_type_hbox = wx.BoxSizer(wx.HORIZONTAL)
chart_type_label = wx.StaticText(pnl, label='Chart Type')
chart_type_options = ['Horizontal Bar', 'Vertical Column', 'Vertical Line']
self.chart_type_choice = wx.Choice(pnl, choices=chart_type_options)
chart_type_hbox.Add(chart_type_label, 0, wx.ALL, 5)
chart_type_hbox.Add(self.chart_type_choice, 1, wx.ALL, 5)
misc_option_vbox.Add(chart_type_hbox, 0, wx.ALL, 5)
sort_option_hbox = wx.BoxSizer(wx.HORIZONTAL)
sort_option_label = wx.StaticText(pnl, label='Chart Sort Options')
sort_option_options = ['No Sort', 'Ascending', 'Decending']
self.sort_option_choice = wx.Choice(pnl, choices=sort_option_options)
sort_option_hbox.Add(sort_option_label, 0, wx.ALL, 5)
sort_option_hbox.Add(self.sort_option_choice, 1, wx.ALL, 5)
misc_option_vbox.Add(sort_option_hbox, 0, wx.ALL, 5)
self.show_cumulative_checkbox = wx.CheckBox(pnl, label='Show Cumulative Chart Slide')
misc_option_vbox.Add(self.show_cumulative_checkbox, 0, wx.ALL, 5)
self.show_end_use_pie_checkbox = wx.CheckBox(pnl, label='Show End Use Pie Chart Slide')
misc_option_vbox.Add(self.show_end_use_pie_checkbox, 0, wx.ALL, 5)
self.show_end_use_monthly_checkbox = wx.CheckBox(pnl, label='Show End Use Monthly Chart Slide')
misc_option_vbox.Add(self.show_end_use_monthly_checkbox, 0, wx.ALL, 5)
top_hbox.Add(misc_option_vbox, 0, wx.ALL, 5)
excel_box = wx.StaticBox(pnl, -1, "Excel")
top_border, other_border = excel_box.GetBordersForSizer()
excel_sizer = wx.BoxSizer(wx.VERTICAL)
excel_sizer.AddSpacer(top_border)
num_rows_hbox = wx.BoxSizer(wx.HORIZONTAL)
num_rows_label = wx.StaticText(excel_box, label='Number of Rows Per "Slide"')
num_rows_options = ['10','15','20','25','30','35','40','45','50']
self.num_rows_choice = wx.Choice(excel_box, choices=num_rows_options)
num_rows_hbox.Add(num_rows_label, 0, wx.ALL, 5)
num_rows_hbox.Add(self.num_rows_choice, 1, wx.ALL, 5)
excel_sizer.Add(num_rows_hbox, 0, wx.ALL, 5)
tab_name_hbox = wx.BoxSizer(wx.HORIZONTAL)
tab_name_label = wx.StaticText(excel_box, label='Tab Name')
self.tab_name_field = wx.TextCtrl(excel_box)
tab_name_hbox.Add(tab_name_label, 0, wx.ALL, 5)
tab_name_hbox.Add(self.tab_name_field, 1, wx.ALL, 5)
excel_sizer.Add(self.tab_name_field, 0, wx.ALL, 5)
columns_of_values = wx.StaticText(excel_box, label='Columns of Values')
excel_sizer.Add(columns_of_values, 0, wx.ALL, 5)
columns_of_values_list = ['-' for x in range(8)]
self.columns_of_values_list = wx.CheckListBox(excel_box, 1, (80, 50), wx.DefaultSize, columns_of_values_list)
excel_sizer.Add(self.columns_of_values_list, 1, wx.ALL | wx.EXPAND, 10)
excel_box.SetSizer(excel_sizer)
top_hbox.Add(excel_box, 0, wx.ALL, 5)
dialog_vbox.Add(top_hbox, 0, wx.ALL , 5)
ok_cancel_hbox = wx.BoxSizer(wx.HORIZONTAL)
ok_button = wx.Button(pnl, wx.ID_OK , "OK", size=(60, 30))
ok_button.Bind(wx.EVT_BUTTON, self.handle_ok_button)
cancel_button = wx.Button(pnl, wx.ID_CANCEL, "Cancel", size=(60, 30))
cancel_button.Bind(wx.EVT_BUTTON, self.handle_cancel_button)
ok_cancel_hbox.Add(ok_button, 0, wx.ALL, 5)
ok_cancel_hbox.Add(cancel_button, 0, wx.ALL, 5)
dialog_vbox.Add(ok_cancel_hbox, 0, wx.ALL | wx.ALIGN_RIGHT, 5)
pnl.SetSizer(dialog_vbox)
pnl.Fit()
self.Fit()
self.SetTitle("General Options")
def set_parameters(self, general_options_dict):
self.general_options_dict = general_options_dict
selected_index = self.output_metric_choice.FindString(self.general_options_dict['Output Metric'])
self.output_metric_choice.SetSelection(selected_index)
selected_index = self.chart_type_choice.FindString(self.general_options_dict['Chart Type'])
self.chart_type_choice.SetSelection(selected_index)
selected_index = self.units_choice.FindString(self.general_options_dict['Units'])
self.units_choice.SetSelection(selected_index)
selected_index = self.sort_option_choice.FindString(self.general_options_dict['Chart Sort Options'])
self.sort_option_choice.SetSelection(selected_index)
self.show_cumulative_checkbox.SetValue(self.general_options_dict['Show Cumulative Chart Slide'])
self.show_end_use_pie_checkbox.SetValue(self.general_options_dict['Show End Use Pie Chart Slide'])
self.show_end_use_monthly_checkbox.SetValue(self.general_options_dict['Show End Use Monthly Chart Slide'])
selected_index = self.num_rows_choice.FindString(self.general_options_dict['Number of Rows Per Slide'])
self.num_rows_choice.SetSelection(selected_index)
self.tab_name_field.SetValue(self.general_options_dict['Tab Name'])
columns_of_values_dict = self.general_options_dict['Columns of Values']
list_of_columns = list(columns_of_values_dict.keys())
self.columns_of_values_list.Set(list_of_columns)
for column_item in list_of_columns:
index = self.columns_of_values_list.FindString(column_item)
self.columns_of_values_list.Check(index, columns_of_values_dict[column_item])
def handle_ok_button(self, event):
self.save_parameters()
self.EndModal(self.CLOSE_SIGNAL_OK)
def save_parameters(self):
self.general_options_dict.clear()
selected_index = self.output_metric_choice.GetSelection()
self.general_options_dict['Output Metric'] = self.output_metric_choice.GetString(selected_index)
selected_index = self.chart_type_choice.GetSelection()
self.general_options_dict['Chart Type'] = self.chart_type_choice.GetString(selected_index)
selected_index = self.units_choice.GetSelection()
self.general_options_dict['Units'] = self.units_choice.GetString(selected_index)
selected_index = self.sort_option_choice.GetSelection()
self.general_options_dict['Chart Sort Options'] = self.sort_option_choice.GetString(selected_index)
self.general_options_dict['Show Cumulative Chart Slide']= self.show_cumulative_checkbox.GetValue()
self.general_options_dict['Show End Use Pie Chart Slide']= self.show_end_use_pie_checkbox.GetValue()
self.general_options_dict['Show End Use Monthly Chart Slide']= self.show_end_use_monthly_checkbox.GetValue()
selected_index = self.num_rows_choice.GetSelection()
self.general_options_dict['Number of Rows Per Slide'] = self.num_rows_choice.GetString(selected_index)
self.general_options_dict['Tab Name'] = self.tab_name_field.GetValue()
columns_of_values_dict = {}
list_of_columns = self.columns_of_values_list.GetItems()
for column_item in list_of_columns:
index = self.columns_of_values_list.FindString(column_item)
columns_of_values_dict[column_item] = self.columns_of_values_list.IsChecked(index)
self.general_options_dict['Columns of Values'] = columns_of_values_dict
def handle_cancel_button(self, event):
self.EndModal(self.CLOSE_SIGNAL_CANCEL)
``` |
{
"source": "jmarrec/EnergyPlusRegressionTool",
"score": 2
} |
#### File: epregressions/builds/makefile.py
```python
import os
from epregressions.builds.base import BaseBuildDirectoryStructure
from epregressions.ep_platform import exe_extension
class CMakeCacheMakeFileBuildDirectory(BaseBuildDirectoryStructure):
def __init__(self):
super(CMakeCacheMakeFileBuildDirectory, self).__init__()
self.source_directory = None
def set_build_directory(self, build_directory):
"""
This method takes a build directory, and updates any dependent member variables, in this case the source dir.
This method *does* allow an invalid build_directory, as could happen during program initialization
:param build_directory:
:return:
"""
self.build_directory = build_directory
if not os.path.exists(self.build_directory):
self.source_directory = 'unknown - invalid build directory?'
return
cmake_cache_file = os.path.join(self.build_directory, 'CMakeCache.txt')
if not os.path.exists(cmake_cache_file):
raise Exception('Could not find cache file in build directory')
with open(cmake_cache_file, 'r') as f_cache:
for this_line in f_cache.readlines():
if 'CMAKE_HOME_DIRECTORY:INTERNAL=' in this_line:
tokens = this_line.strip().split('=')
self.source_directory = tokens[1]
break
else:
raise Exception('Could not find source directory spec in the CMakeCache file')
def get_idf_directory(self):
if not self.build_directory:
raise Exception('Build directory has not been set with set_build_directory()')
return os.path.join(self.source_directory, 'testfiles')
def get_build_tree(self):
if not self.build_directory:
raise Exception('Build directory has not been set with set_build_directory()')
this_exe_ext = exe_extension()
return {
'build_dir': self.build_directory,
'source_dir': self.source_directory,
'energyplus': os.path.join(self.build_directory, 'Products', 'energyplus' + this_exe_ext),
'basement': os.path.join(self.build_directory, 'Products', 'Basement' + this_exe_ext),
'idd_path': os.path.join(self.build_directory, 'Products', 'Energy+.idd'),
'slab': os.path.join(self.build_directory, 'Products', 'Slab' + this_exe_ext),
'basementidd': os.path.join(self.build_directory, 'Products', 'BasementGHT.idd'),
'slabidd': os.path.join(self.build_directory, 'Products', 'SlabGHT.idd'),
'expandobjects': os.path.join(self.build_directory, 'Products', 'ExpandObjects' + this_exe_ext),
'epmacro': os.path.join(self.source_directory, 'bin', 'EPMacro', 'Linux', 'EPMacro' + this_exe_ext),
'readvars': os.path.join(self.build_directory, 'Products', 'ReadVarsESO' + this_exe_ext),
'parametric': os.path.join(self.build_directory, 'Products', 'ParametricPreprocessor' + this_exe_ext),
'test_files_dir': os.path.join(self.source_directory, 'testfiles'),
'weather_dir': os.path.join(self.source_directory, 'weather'),
'data_sets_dir': os.path.join(self.source_directory, 'datasets')
}
```
#### File: epregressions/builds/visualstudio.py
```python
import os
from epregressions.builds.base import BaseBuildDirectoryStructure
class CMakeCacheVisualStudioBuildDirectory(BaseBuildDirectoryStructure):
"""
A Visual Studio based build directory class
This tries to use a "Release" folder, but if it does not exist it tries to fall back to a "Debug" folder
"""
def __init__(self):
super(CMakeCacheVisualStudioBuildDirectory, self).__init__()
self.source_directory = None
self.build_mode = 'Release'
def set_build_mode(self, debug):
self.build_mode = 'Debug' if debug else 'Release'
def set_build_directory(self, build_directory):
"""
This method takes a build directory, and updates any dependent member variables, in this case the source dir.
This method *does* allow an invalid build_directory, as could happen during program initialization
:param build_directory:
:return:
"""
self.build_directory = build_directory
if not os.path.exists(self.build_directory):
self.source_directory = 'unknown - invalid build directory?'
return
cmake_cache_file = os.path.join(self.build_directory, 'CMakeCache.txt')
with open(cmake_cache_file, 'r') as f_cache:
for this_line in f_cache.readlines():
if 'CMAKE_HOME_DIRECTORY:INTERNAL=' in this_line:
tokens = this_line.strip().split('=')
self.source_directory = tokens[1]
break
else:
raise Exception('Could not find source directory spec in the CMakeCache file')
build_mode_folder = 'Release'
release_folder = os.path.join(self.build_directory, 'Products', build_mode_folder)
release_folder_exists = os.path.exists(release_folder)
if release_folder_exists:
self.set_build_mode(debug=False)
else:
self.set_build_mode(debug=True)
def get_idf_directory(self):
if not self.build_directory:
raise Exception('Build directory has not been set with set_build_directory()')
return os.path.join(self.source_directory, 'testfiles')
def get_build_tree(self):
if not self.build_directory:
raise Exception('Build directory has not been set with set_build_directory()')
return {
'build_dir': self.build_directory,
'source_dir': self.source_directory,
'energyplus': os.path.join(self.build_directory, 'Products', self.build_mode, 'energyplus.exe'),
'basement': os.path.join(self.build_directory, 'Products', 'Basement.exe'),
'idd_path': os.path.join(self.build_directory, 'Products', 'Energy+.idd'),
'slab': os.path.join(self.build_directory, 'Products', 'Slab.exe'),
'basementidd': os.path.join(self.build_directory, 'Products', 'BasementGHT.idd'),
'slabidd': os.path.join(self.build_directory, 'Products', 'SlabGHT.idd'),
'expandobjects': os.path.join(self.build_directory, 'Products', 'ExpandObjects.exe'),
'epmacro': os.path.join(self.source_directory, 'bin', 'EPMacro', 'Linux', 'EPMacro.exe'),
'readvars': os.path.join(self.build_directory, 'Products', 'ReadVarsESO.exe'),
'parametric': os.path.join(self.build_directory, 'Products', 'ParametricPreprocessor.exe'),
'test_files_dir': os.path.join(self.source_directory, 'testfiles'),
'weather_dir': os.path.join(self.source_directory, 'weather'),
'data_sets_dir': os.path.join(self.source_directory, 'datasets')
}
```
#### File: EnergyPlusRegressionTool/epregressions/energyplus.py
```python
import glob
import os
import shutil
import subprocess
from epregressions.structures import ForceRunType
path = os.path.dirname(__file__)
script_dir = os.path.abspath(path)
class ExecutionArguments:
def __init__(self, build_tree, entry_name, test_run_directory,
run_type, min_reporting_freq, this_parametric_file, weather_file_name):
self.build_tree = build_tree
self.entry_name = entry_name
self.test_run_directory = test_run_directory
self.run_type = run_type
self.min_reporting_freq = min_reporting_freq
self.this_parametric_file = this_parametric_file
self.weather_file_name = weather_file_name
# noinspection PyBroadException
def execute_energyplus(e_args: ExecutionArguments):
# setup a few paths
energyplus = e_args.build_tree['energyplus']
basement = e_args.build_tree['basement']
idd_path = e_args.build_tree['idd_path']
slab = e_args.build_tree['slab']
basementidd = e_args.build_tree['basementidd']
slabidd = e_args.build_tree['slabidd']
expandobjects = e_args.build_tree['expandobjects']
epmacro = e_args.build_tree['epmacro']
readvars = e_args.build_tree['readvars']
parametric = e_args.build_tree['parametric']
# Save the current path so we can go back here
start_path = os.getcwd()
std_out = b""
std_err = b""
try:
new_idd_path = os.path.join(e_args.test_run_directory, 'Energy+.idd')
shutil.copy(idd_path, new_idd_path)
# Copy the weather file into the simulation directory
if e_args.weather_file_name:
shutil.copy(e_args.weather_file_name, os.path.join(e_args.test_run_directory, 'in.epw'))
# Switch to the simulation directory
os.chdir(e_args.test_run_directory)
# Run EPMacro as necessary
if os.path.exists('in.imf'):
with open('in.imf', 'rb') as f:
lines = f.readlines()
newlines = []
for line in lines:
encoded_line = line.decode('UTF-8', 'ignore')
if '##fileprefix' in encoded_line:
newlines.append('')
else:
newlines.append(encoded_line)
with open('in.imf', 'w') as f:
for line in newlines:
f.write(line)
macro_run = subprocess.Popen(
epmacro, shell=True, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
o, e = macro_run.communicate()
std_out += o
std_err += e
os.rename('out.idf', 'in.idf')
# Run Preprocessor -- after EPMacro?
if e_args.this_parametric_file:
parametric_run = subprocess.Popen(
parametric + ' in.idf', shell=True, stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
o, e = parametric_run.communicate()
std_out += o
std_err += e
candidate_files = glob.glob('in-*.idf')
if len(candidate_files) > 0:
file_to_run_here = sorted(candidate_files)[0]
if os.path.exists('in.idf'):
os.remove('in.idf')
os.rename(file_to_run_here, 'in.idf')
else:
return [e_args.build_tree['build_dir'], e_args.entry_name, False, False, "Issue with Parametrics"]
# Run ExpandObjects and process as necessary
expand_objects_run = subprocess.Popen(
expandobjects, shell=True, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
o, e = expand_objects_run.communicate()
std_out += o
std_err += e
if os.path.exists('expanded.idf'):
if os.path.exists('in.idf'):
os.remove('in.idf')
os.rename('expanded.idf', 'in.idf')
if os.path.exists('BasementGHTIn.idf'):
shutil.copy(basementidd, e_args.test_run_directory)
basement_environment = os.environ.copy()
basement_environment['CI_BASEMENT_NUMYEARS'] = '2'
basement_run = subprocess.Popen(
basement, shell=True, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=basement_environment
)
o, e = basement_run.communicate()
std_out += o
std_err += e
with open('EPObjects.TXT') as f:
append_text = f.read()
with open('in.idf', 'a') as f:
f.write("\n%s\n" % append_text)
os.remove('RunINPUT.TXT')
os.remove('RunDEBUGOUT.TXT')
os.remove('EPObjects.TXT')
os.remove('BasementGHTIn.idf')
os.remove('MonthlyResults.csv')
os.remove('BasementGHT.idd')
if os.path.exists('GHTIn.idf'):
shutil.copy(slabidd, e_args.test_run_directory)
slab_run = subprocess.Popen(
slab, shell=True, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
o, e = slab_run.communicate()
std_out += o
std_err += e
with open('SLABSurfaceTemps.TXT') as f:
append_text = f.read()
with open('in.idf', 'a') as f:
f.write("\n%s\n" % append_text)
os.remove('SLABINP.TXT')
os.remove('GHTIn.idf')
os.remove('SLABSurfaceTemps.TXT')
os.remove('SLABSplit Surface Temps.TXT')
os.remove('SlabGHT.idd')
# Set up environment
os.environ["DISPLAYADVANCEDREPORTVARIABLES"] = "YES"
os.environ["DISPLAYALLWARNINGS"] = "YES"
if e_args.run_type == ForceRunType.DD:
os.environ["DDONLY"] = "Y"
os.environ["REVERSEDD"] = ""
os.environ["FULLANNUALRUN"] = ""
elif e_args.run_type == ForceRunType.ANNUAL:
os.environ["DDONLY"] = ""
os.environ["REVERSEDD"] = ""
os.environ["FULLANNUALRUN"] = "Y"
elif e_args.run_type == ForceRunType.NONE:
os.environ["DDONLY"] = ""
os.environ["REVERSEDD"] = ""
os.environ["FULLANNUALRUN"] = ""
else:
pass
# nothing
# use the user-entered minimum reporting frequency
# (useful for limiting to daily outputs for annual simulation, etc.)
os.environ["MINREPORTFREQUENCY"] = e_args.min_reporting_freq.upper()
# Execute EnergyPlus
try:
std_out += subprocess.check_output(
energyplus, shell=True, stdin=subprocess.DEVNULL, stderr=subprocess.PIPE
)
except subprocess.CalledProcessError as e: # pragma: no cover
...
# so I can verify that I hit this during the test_case_b_crash test, but if I just have the return in
# here alone, it shows as missing on the coverage...wonky
return [e_args.build_tree['build_dir'], e_args.entry_name, False, False, str(e)]
# Execute readvars
if os.path.exists('in.rvi'):
csv_run = subprocess.Popen(
readvars + ' in.rvi', shell=True, stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
else:
csv_run = subprocess.Popen(
readvars, shell=True, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
o, e = csv_run.communicate()
std_out += o
std_err += e
if os.path.exists('in.mvi'):
mtr_run = subprocess.Popen(
readvars + ' in.mvi', shell=True, stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
else:
with open('in.mvi', 'w') as f:
f.write("eplusout.mtr\n")
f.write("eplusmtr.csv\n")
mtr_run = subprocess.Popen(
readvars + ' in.mvi', shell=True, stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
o, e = mtr_run.communicate()
std_out += o
std_err += e
if len(std_out) > 0:
with open('eplusout.stdout', 'w') as f:
f.write(std_out.decode('utf-8'))
if len(std_err) > 0:
with open('eplusout.stderr', 'w') as f:
f.write(std_err.decode('utf-8'))
os.remove(new_idd_path)
return [e_args.build_tree['build_dir'], e_args.entry_name, True, False]
except Exception as e:
return [e_args.build_tree['build_dir'], e_args.entry_name, False, False, str(e)]
finally:
os.chdir(start_path)
```
#### File: EnergyPlusRegressionTool/epregressions/runner.py
```python
from multiprocessing import set_start_method
from platform import system
from sys import argv
def main_gui():
from epregressions.tk_window import MyApp
app = MyApp()
app.run()
if __name__ == "__main__":
if system() == 'Darwin':
set_start_method('forkserver')
if len(argv) == 1: # GUI
main_gui()
else: # Non-GUI operation, execute some command
...
```
#### File: tests/builds/test_base.py
```python
import os
import tempfile
import unittest
from epregressions.builds.base import BaseBuildDirectoryStructure, autodetect_build_dir_type, KnownBuildTypes
class TestAutoDetectBuildType(unittest.TestCase):
def setUp(self):
self.build_dir = tempfile.mkdtemp()
def add_cache_file(self, content):
cache_file = os.path.join(self.build_dir, 'CMakeCache.txt')
with open(cache_file, 'w') as f:
f.write(content)
def add_subdirectory(self, dir_name):
os.makedirs(os.path.join(self.build_dir, dir_name))
def test_empty_dir_is_unknown(self):
self.assertEqual(KnownBuildTypes.Unknown, autodetect_build_dir_type(self.build_dir))
def test_detect_install(self):
self.add_subdirectory('ExampleFiles')
self.assertEqual(KnownBuildTypes.Installation, autodetect_build_dir_type(self.build_dir))
def test_detect_makefile(self):
self.add_cache_file('CMAKE_GENERATOR:INTERNAL=Unix Makefiles')
self.assertEqual(KnownBuildTypes.Makefile, autodetect_build_dir_type(self.build_dir))
def test_detect_visual_studio(self):
self.add_cache_file('CMAKE_GENERATOR:INTERNAL=Visual Studio 2019')
self.assertEqual(KnownBuildTypes.VisualStudio, autodetect_build_dir_type(self.build_dir))
def test_detect_ninja(self):
self.add_cache_file('CMAKE_GENERATOR:INTERNAL=Ninja')
self.assertEqual(KnownBuildTypes.Makefile, autodetect_build_dir_type(self.build_dir))
class TestBaseBuildMethods(unittest.TestCase):
def setUp(self):
self.base_build = BaseBuildDirectoryStructure()
def test_set_build_directory_abstract(self):
with self.assertRaises(NotImplementedError):
self.base_build.set_build_directory('hello')
def test_get_build_tree_abstract(self):
with self.assertRaises(NotImplementedError):
self.base_build.get_build_tree()
def test_get_idf_directory(self):
with self.assertRaises(NotImplementedError):
self.base_build.get_idf_directory()
def test_verify_without_setting_build_dir(self):
with self.assertRaises(Exception):
self.base_build.verify()
def test_get_idfs(self):
temp_idf_dir = tempfile.mkdtemp()
self.assertSetEqual(set(), self.base_build.get_idfs_in_dir(temp_idf_dir))
with open(os.path.join(temp_idf_dir, 'file1.idf'), 'w') as f:
f.write('hi')
with open(os.path.join(temp_idf_dir, 'file2.iQQ'), 'w') as f:
f.write('he')
with open(os.path.join(temp_idf_dir, 'file3.idf'), 'w') as f:
f.write('ha')
with open(os.path.join(temp_idf_dir, 'file4.imf'), 'w') as f:
f.write('ha') # macro
with open(os.path.join(temp_idf_dir, '_ExternalInterface-actuator.idf'), 'w') as f:
f.write('ha') # ext interface as FMU
with open(os.path.join(temp_idf_dir, 'HVAC3ZoneGeometry.imf'), 'w') as f:
f.write('ha') # macro resource file
# TODO: Modify the test to expect relevant IMF files as well and fix the function
self.assertEqual(3, len(self.base_build.get_idfs_in_dir(temp_idf_dir)))
``` |
{
"source": "jmarrec/idf-tags",
"score": 2
} |
#### File: idf-tags/idftags/idf_tag.py
```python
import os
import glob as gb
import sys # for compatilbity
# Because find_non_reference_classes relies on Eppy, it's slow, so I hardcode
# the classes names that aren't a reference object.
NOT_REFERENCE_CLASSES = [
'AIRCONDITIONER:VARIABLEREFRIGERANTFLOW:FLUIDTEMPERATURECONTROL',
'AIRCONDITIONER:VARIABLEREFRIGERANTFLOW:FLUIDTEMPERATURECONTROL:HR',
'AIRFLOWNETWORK:DISTRIBUTION:DUCTVIEWFACTORS',
'AIRFLOWNETWORK:DISTRIBUTION:LINKAGE',
'AIRFLOWNETWORK:MULTIZONE:SURFACE',
'AIRFLOWNETWORK:SIMULATIONCONTROL',
'AIRLOOPHVAC:RETURNPATH',
'AIRLOOPHVAC:SUPPLYPATH',
'BUILDING',
'COIL:WATERHEATING:DESUPERHEATER',
'COMPLEXFENESTRATIONPROPERTY:SOLARABSORBEDLAYERS',
'COMPLIANCE:BUILDING',
'COMPONENTCOST:ADJUSTMENTS',
'COMPONENTCOST:LINEITEM',
'COMPONENTCOST:REFERENCE',
'CONNECTOR:MIXER',
'CONNECTOR:SPLITTER',
'CONVERGENCELIMITS',
'CURRENCYTYPE',
'DAYLIGHTING:CONTROLS',
'DAYLIGHTING:DELIGHT:COMPLEXFENESTRATION',
'DAYLIGHTINGDEVICE:LIGHTWELL',
'DAYLIGHTINGDEVICE:SHELF',
'DAYLIGHTINGDEVICE:TUBULAR',
'DEMANDMANAGERASSIGNMENTLIST',
'ELECTRICEQUIPMENT:ITE:AIRCOOLED',
'ELECTRICLOADCENTER:DISTRIBUTION',
'ENERGYMANAGEMENTSYSTEM:ACTUATOR',
'ENERGYMANAGEMENTSYSTEM:CONSTRUCTIONINDEXVARIABLE',
'ENERGYMANAGEMENTSYSTEM:CURVEORTABLEINDEXVARIABLE',
'ENERGYMANAGEMENTSYSTEM:GLOBALVARIABLE',
'ENERGYMANAGEMENTSYSTEM:INTERNALVARIABLE',
'ENERGYMANAGEMENTSYSTEM:METEREDOUTPUTVARIABLE',
'ENERGYMANAGEMENTSYSTEM:OUTPUTVARIABLE',
'ENERGYMANAGEMENTSYSTEM:SENSOR',
'ENERGYMANAGEMENTSYSTEM:TRENDVARIABLE',
'ENVIRONMENTALIMPACTFACTORS',
'EXTERIOR:FUELEQUIPMENT',
'EXTERIOR:WATEREQUIPMENT',
'EXTERNALINTERFACE',
'EXTERNALINTERFACE:ACTUATOR',
'EXTERNALINTERFACE:FUNCTIONALMOCKUPUNITEXPORT:FROM:VARIABLE',
'EXTERNALINTERFACE:FUNCTIONALMOCKUPUNITEXPORT:TO:ACTUATOR',
'EXTERNALINTERFACE:FUNCTIONALMOCKUPUNITEXPORT:TO:VARIABLE',
'EXTERNALINTERFACE:FUNCTIONALMOCKUPUNITIMPORT:FROM:VARIABLE',
'EXTERNALINTERFACE:FUNCTIONALMOCKUPUNITIMPORT:TO:ACTUATOR',
'EXTERNALINTERFACE:FUNCTIONALMOCKUPUNITIMPORT:TO:VARIABLE',
'EXTERNALINTERFACE:VARIABLE',
'FANPERFORMANCE:NIGHTVENTILATION',
'FAULTMODEL:ENTHALPYSENSOROFFSET:OUTDOORAIR',
'FAULTMODEL:ENTHALPYSENSOROFFSET:RETURNAIR',
'FAULTMODEL:FOULING:AIRFILTER',
'FAULTMODEL:FOULING:BOILER',
'FAULTMODEL:FOULING:CHILLER',
'FAULTMODEL:FOULING:COIL',
'FAULTMODEL:FOULING:COOLINGTOWER',
'FAULTMODEL:FOULING:EVAPORATIVECOOLER',
'FAULTMODEL:HUMIDISTATOFFSET',
'FAULTMODEL:HUMIDITYSENSOROFFSET:OUTDOORAIR',
'FAULTMODEL:PRESSURESENSOROFFSET:OUTDOORAIR',
'FAULTMODEL:TEMPERATURESENSOROFFSET:CHILLERSUPPLYWATER',
'FAULTMODEL:TEMPERATURESENSOROFFSET:COILSUPPLYAIR',
'FAULTMODEL:TEMPERATURESENSOROFFSET:CONDENSERSUPPLYWATER',
'FAULTMODEL:TEMPERATURESENSOROFFSET:OUTDOORAIR',
'FAULTMODEL:TEMPERATURESENSOROFFSET:RETURNAIR',
'FLUIDPROPERTIES:CONCENTRATION',
'FLUIDPROPERTIES:GLYCOLCONCENTRATION',
'FLUIDPROPERTIES:SATURATED',
'FLUIDPROPERTIES:SUPERHEATED',
'FOUNDATION:KIVA:SETTINGS',
'FUELFACTORS',
'GASEQUIPMENT',
'GEOMETRYTRANSFORM',
'GLOBALGEOMETRYRULES',
'GROUNDHEATTRANSFER:BASEMENT:AUTOGRID',
'GROUNDHEATTRANSFER:BASEMENT:BLDGDATA',
'GROUNDHEATTRANSFER:BASEMENT:COMBLDG',
'GROUNDHEATTRANSFER:BASEMENT:EQUIVAUTOGRID',
'GROUNDHEATTRANSFER:BASEMENT:EQUIVSLAB',
'GROUNDHEATTRANSFER:BASEMENT:INSULATION',
'GROUNDHEATTRANSFER:BASEMENT:INTERIOR',
'GROUNDHEATTRANSFER:BASEMENT:MANUALGRID',
'GROUNDHEATTRANSFER:BASEMENT:MATLPROPS',
'GROUNDHEATTRANSFER:BASEMENT:SIMPARAMETERS',
'GROUNDHEATTRANSFER:BASEMENT:SURFACEPROPS',
'GROUNDHEATTRANSFER:BASEMENT:XFACE',
'GROUNDHEATTRANSFER:BASEMENT:YFACE',
'GROUNDHEATTRANSFER:BASEMENT:ZFACE',
'GROUNDHEATTRANSFER:CONTROL',
'GROUNDHEATTRANSFER:SLAB:AUTOGRID',
'GROUNDHEATTRANSFER:SLAB:BLDGPROPS',
'GROUNDHEATTRANSFER:SLAB:BOUNDCONDS',
'GROUNDHEATTRANSFER:SLAB:EQUIVALENTSLAB',
'GROUNDHEATTRANSFER:SLAB:INSULATION',
'GROUNDHEATTRANSFER:SLAB:MANUALGRID',
'GROUNDHEATTRANSFER:SLAB:MATERIALS',
'GROUNDHEATTRANSFER:SLAB:MATLPROPS',
'GROUNDHEATTRANSFER:SLAB:XFACE',
'GROUNDHEATTRANSFER:SLAB:YFACE',
'GROUNDHEATTRANSFER:SLAB:ZFACE',
'HEATBALANCEALGORITHM',
'HEATBALANCESETTINGS:CONDUCTIONFINITEDIFFERENCE',
'HOTWATEREQUIPMENT',
'HVACTEMPLATE:PLANT:BOILER',
'HVACTEMPLATE:PLANT:BOILER:OBJECTREFERENCE',
'HVACTEMPLATE:PLANT:CHILLEDWATERLOOP',
'HVACTEMPLATE:PLANT:CHILLER',
'HVACTEMPLATE:PLANT:CHILLER:OBJECTREFERENCE',
'HVACTEMPLATE:PLANT:HOTWATERLOOP',
'HVACTEMPLATE:PLANT:MIXEDWATERLOOP',
'HVACTEMPLATE:PLANT:TOWER',
'HVACTEMPLATE:PLANT:TOWER:OBJECTREFERENCE',
'HVACTEMPLATE:ZONE:BASEBOARDHEAT',
'HVACTEMPLATE:ZONE:DUALDUCT',
'HVACTEMPLATE:ZONE:FANCOIL',
'HVACTEMPLATE:ZONE:IDEALLOADSAIRSYSTEM',
'HVACTEMPLATE:ZONE:PTAC',
'HVACTEMPLATE:ZONE:PTHP',
'HVACTEMPLATE:ZONE:UNITARY',
'HVACTEMPLATE:ZONE:VAV',
'HVACTEMPLATE:ZONE:VAV:FANPOWERED',
'HVACTEMPLATE:ZONE:VAV:HEATANDCOOL',
'HVACTEMPLATE:ZONE:VRF',
'HVACTEMPLATE:ZONE:WATERTOAIRHEATPUMP',
'HYBRIDMODEL:ZONE',
'LEAD INPUT',
'LIFECYCLECOST:NONRECURRINGCOST',
'LIFECYCLECOST:PARAMETERS',
'LIFECYCLECOST:RECURRINGCOSTS',
'LIFECYCLECOST:USEADJUSTMENT',
'LIFECYCLECOST:USEPRICEESCALATION',
'MATERIALPROPERTY:HEATANDMOISTURETRANSFER:DIFFUSION',
'MATERIALPROPERTY:HEATANDMOISTURETRANSFER:REDISTRIBUTION',
'MATERIALPROPERTY:HEATANDMOISTURETRANSFER:SETTINGS',
'MATERIALPROPERTY:HEATANDMOISTURETRANSFER:SORPTIONISOTHERM',
'MATERIALPROPERTY:HEATANDMOISTURETRANSFER:SUCTION',
'MATERIALPROPERTY:HEATANDMOISTURETRANSFER:THERMALCONDUCTIVITY',
'MATERIALPROPERTY:MOISTUREPENETRATIONDEPTH:SETTINGS',
'MATERIALPROPERTY:PHASECHANGE',
'MATERIALPROPERTY:VARIABLETHERMALCONDUCTIVITY',
'METER:CUSTOM',
'METER:CUSTOMDECREMENT',
'NODELIST',
'OTHEREQUIPMENT',
'OUTDOORAIR:NODE',
'OUTDOORAIR:NODELIST',
'OUTPUT:CONSTRUCTIONS',
'OUTPUT:DAYLIGHTFACTORS',
'OUTPUT:DEBUGGINGDATA',
'OUTPUT:DIAGNOSTICS',
'OUTPUT:ENERGYMANAGEMENTSYSTEM',
'OUTPUT:ENVIRONMENTALIMPACTFACTORS',
'OUTPUT:ILLUMINANCEMAP',
'OUTPUT:METER',
'OUTPUT:METER:CUMULATIVE',
'OUTPUT:METER:CUMULATIVE:METERFILEONLY',
'OUTPUT:METER:METERFILEONLY',
'OUTPUT:PREPROCESSORMESSAGE',
'OUTPUT:SCHEDULES',
'OUTPUT:SQLITE',
'OUTPUT:SURFACES:DRAWING',
'OUTPUT:SURFACES:LIST',
'OUTPUT:TABLE:ANNUAL',
'OUTPUT:TABLE:MONTHLY',
'OUTPUT:TABLE:SUMMARYREPORTS',
'OUTPUT:TABLE:TIMEBINS',
'OUTPUT:VARIABLE',
'OUTPUT:VARIABLEDICTIONARY',
'OUTPUTCONTROL:ILLUMINANCEMAP:STYLE',
'OUTPUTCONTROL:REPORTINGTOLERANCES',
'OUTPUTCONTROL:SIZING:STYLE',
'OUTPUTCONTROL:TABLE:STYLE',
'PARAMETRIC:FILENAMESUFFIX',
'PARAMETRIC:LOGIC',
'PARAMETRIC:RUNCONTROL',
'PARAMETRIC:SETVALUEFORRUN',
'PIPINGSYSTEM:UNDERGROUND:DOMAIN',
'PLANTEQUIPMENTOPERATION:USERDEFINED',
'ROOFIRRIGATION',
'ROOMAIR:TEMPERATUREPATTERN:CONSTANTGRADIENT',
'ROOMAIR:TEMPERATUREPATTERN:NONDIMENSIONALHEIGHT',
'ROOMAIR:TEMPERATUREPATTERN:SURFACEMAPPING',
'ROOMAIR:TEMPERATUREPATTERN:TWOGRADIENT',
'ROOMAIR:TEMPERATUREPATTERN:USERDEFINED',
'ROOMAIRMODELTYPE',
'ROOMAIRSETTINGS:AIRFLOWNETWORK',
'ROOMAIRSETTINGS:CROSSVENTILATION',
'ROOMAIRSETTINGS:ONENODEDISPLACEMENTVENTILATION',
'ROOMAIRSETTINGS:THREENODEDISPLACEMENTVENTILATION',
'ROOMAIRSETTINGS:UNDERFLOORAIRDISTRIBUTIONEXTERIOR',
'ROOMAIRSETTINGS:UNDERFLOORAIRDISTRIBUTIONINTERIOR',
'RUNPERIODCONTROL:DAYLIGHTSAVINGTIME',
'RUNPERIODCONTROL:SPECIALDAYS',
'SETPOINTMANAGER:COLDEST',
'SETPOINTMANAGER:CONDENSERENTERINGRESET',
'SETPOINTMANAGER:CONDENSERENTERINGRESET:IDEAL',
'SETPOINTMANAGER:FOLLOWGROUNDTEMPERATURE',
'SETPOINTMANAGER:FOLLOWOUTDOORAIRTEMPERATURE',
'SETPOINTMANAGER:FOLLOWSYSTEMNODETEMPERATURE',
'SETPOINTMANAGER:MIXEDAIR',
'SETPOINTMANAGER:MULTIZONE:COOLING:AVERAGE',
'SETPOINTMANAGER:MULTIZONE:HEATING:AVERAGE',
'SETPOINTMANAGER:MULTIZONE:HUMIDITY:MAXIMUM',
'SETPOINTMANAGER:MULTIZONE:HUMIDITY:MINIMUM',
'SETPOINTMANAGER:MULTIZONE:MAXIMUMHUMIDITY:AVERAGE',
'SETPOINTMANAGER:MULTIZONE:MINIMUMHUMIDITY:AVERAGE',
'SETPOINTMANAGER:OUTDOORAIRPRETREAT',
'SETPOINTMANAGER:OUTDOORAIRRESET',
'SETPOINTMANAGER:RETURNAIRBYPASSFLOW',
'SETPOINTMANAGER:RETURNTEMPERATURE:CHILLEDWATER',
'SETPOINTMANAGER:RETURNTEMPERATURE:HOTWATER',
'SETPOINTMANAGER:SCHEDULED',
'SETPOINTMANAGER:SCHEDULED:DUALSETPOINT',
'SETPOINTMANAGER:SINGLEZONE:COOLING',
'SETPOINTMANAGER:SINGLEZONE:HEATING',
'SETPOINTMANAGER:SINGLEZONE:HUMIDITY:MAXIMUM',
'SETPOINTMANAGER:SINGLEZONE:HUMIDITY:MINIMUM',
'SETPOINTMANAGER:SINGLEZONE:ONESTAGECOOLING',
'SETPOINTMANAGER:SINGLEZONE:ONESTAGEHEATING',
'SETPOINTMANAGER:SINGLEZONE:REHEAT',
'SETPOINTMANAGER:WARMEST',
'SETPOINTMANAGER:WARMESTTEMPERATUREFLOW',
'SHADINGPROPERTY:REFLECTANCE',
'SHADOWCALCULATION',
'SIMULATION DATA',
'SIMULATIONCONTROL',
'SITE:GROUNDDOMAIN:BASEMENT',
'SITE:GROUNDDOMAIN:SLAB',
'SITE:GROUNDREFLECTANCE',
'SITE:GROUNDREFLECTANCE:SNOWMODIFIER',
'SITE:GROUNDTEMPERATURE:BUILDINGSURFACE',
'SITE:GROUNDTEMPERATURE:DEEP',
'SITE:GROUNDTEMPERATURE:FCFACTORMETHOD',
'SITE:GROUNDTEMPERATURE:SHALLOW',
'SITE:HEIGHTVARIATION',
'SITE:LOCATION',
'SITE:PRECIPITATION',
'SITE:SOLARANDVISIBLESPECTRUM',
'SITE:WATERMAINSTEMPERATURE',
'SITE:WEATHERSTATION',
'SIZING:PARAMETERS',
'SIZING:PLANT',
'SIZING:SYSTEM',
'SIZING:ZONE',
'SOLARCOLLECTOR:UNGLAZEDTRANSPIRED:MULTISYSTEM',
'STEAMEQUIPMENT',
'SURFACECONTAMINANTSOURCEANDSINK:GENERIC:BOUNDARYLAYERDIFFUSION',
'SURFACECONTAMINANTSOURCEANDSINK:GENERIC:DEPOSITIONVELOCITYSINK',
'SURFACECONTAMINANTSOURCEANDSINK:GENERIC:PRESSUREDRIVEN',
'SURFACECONTROL:MOVABLEINSULATION',
'SURFACECONVECTIONALGORITHM:INSIDE',
'SURFACECONVECTIONALGORITHM:INSIDE:ADAPTIVEMODELSELECTIONS',
'SURFACECONVECTIONALGORITHM:OUTSIDE',
'SURFACECONVECTIONALGORITHM:OUTSIDE:ADAPTIVEMODELSELECTIONS',
'SURFACEPROPERTIES:VAPORCOEFFICIENTS',
'SURFACEPROPERTY:CONVECTIONCOEFFICIENTS',
'SURFACEPROPERTY:CONVECTIONCOEFFICIENTS:MULTIPLESURFACE',
'SURFACEPROPERTY:EXPOSEDFOUNDATIONPERIMETER',
'SURFACEPROPERTY:EXTERIORNATURALVENTEDCAVITY',
'SURFACEPROPERTY:HEATTRANSFERALGORITHM',
'SURFACEPROPERTY:HEATTRANSFERALGORITHM:CONSTRUCTION',
'SURFACEPROPERTY:HEATTRANSFERALGORITHM:MULTIPLESURFACE',
'SURFACEPROPERTY:HEATTRANSFERALGORITHM:SURFACELIST',
'SURFACEPROPERTY:SOLARINCIDENTINSIDE',
'TIMESTEP',
'UTILITYCOST:CHARGE:BLOCK',
'UTILITYCOST:CHARGE:SIMPLE',
'UTILITYCOST:COMPUTATION',
'UTILITYCOST:QUALIFY',
'UTILITYCOST:RATCHET',
'UTILITYCOST:VARIABLE',
'VERSION',
'WATERHEATER:SIZING',
'WATERUSE:RAINCOLLECTOR',
'WATERUSE:WELL',
'WEATHERPROPERTY:SKYTEMPERATURE',
'WINDOWPROPERTY:AIRFLOWCONTROL',
'WINDOWPROPERTY:STORMWINDOW',
'ZONEAIRBALANCE:OUTDOORAIR',
'ZONEAIRCONTAMINANTBALANCE',
'ZONEAIRHEATBALANCEALGORITHM',
'ZONEAIRMASSFLOWCONSERVATION',
'ZONEBASEBOARD:OUTDOORTEMPERATURECONTROLLED',
'ZONECAPACITANCEMULTIPLIER:RESEARCHSPECIAL',
'ZONECONTAMINANTSOURCEANDSINK:CARBONDIOXIDE',
'ZONECONTAMINANTSOURCEANDSINK:GENERIC:CONSTANT',
'ZONECONTAMINANTSOURCEANDSINK:GENERIC:CUTOFFMODEL',
'ZONECONTAMINANTSOURCEANDSINK:GENERIC:DECAYSOURCE',
'ZONECONTAMINANTSOURCEANDSINK:GENERIC:DEPOSITIONRATESINK',
'ZONECONTROL:CONTAMINANTCONTROLLER',
'ZONECONTROL:THERMOSTAT:OPERATIVETEMPERATURE',
'ZONECONTROL:THERMOSTAT:TEMPERATUREANDHUMIDITY',
'ZONECONTROL:THERMOSTAT:THERMALCOMFORT',
'ZONECOOLTOWER:SHOWER',
'ZONECROSSMIXING',
'ZONEEARTHTUBE',
'ZONEGROUP',
'ZONEHVAC:EQUIPMENTCONNECTIONS',
'ZONEINFILTRATION:DESIGNFLOWRATE',
'ZONEINFILTRATION:EFFECTIVELEAKAGEAREA',
'ZONEINFILTRATION:FLOWCOEFFICIENT',
'ZONEMIXING',
'ZONEPROPERTY:USERVIEWFACTORS:BYSURFACENAME',
'ZONEREFRIGERATIONDOORMIXING',
'ZONETHERMALCHIMNEY']
def find_non_reference_classes(idd_path):
"""
Uses Eppy to parse the IDD and return the object classes that aren't
used as reference.
Args:
-----
* idd_path (str): path to the Energy+.idd
eg: /Applications/EnergyPlus-8-7-0/Energy+.idd
Returns:
--------
* not_reference_classes (list): a list of classnames that aren't
referenced, IN UPPER CASE, and sorted
Needs:
------
eppy
"""
from eppy.EPlusInterfaceFunctions import parse_idd
x = parse_idd.extractidddata(idd_path)
useful = x[2]
not_reference_classes = []
for i, idd_class in enumerate(useful):
is_reference = False
classname = idd_class[0]['idfobj']
if len(idd_class) > 1:
if 'reference' in idd_class[1].keys():
if len(idd_class[1]['reference']) > 0:
is_reference = True
if not is_reference:
not_reference_classes.append(classname)
return sorted([x.upper() for x in not_reference_classes])
def lint_and_tag_file(idf_path):
"""
This will open a file, replace all special characters in the object names
and output a new file. The linted object names will also be used to
create ctags compatible statement stored in a returned list
Args:
-----
* idf_path (str): a path to the IDF file
Returns:
--------
* tags (list): a list of ctags statements
Needs:
-------
import os
"""
tags = []
tag_s = "{tagname}\t{tagfile}\t/^{tagaddress}$"
path, ext = os.path.splitext(idf_path)
new_filename = "{}-out{}".format(path, ext)
# Read the content of the original IDF file
# Writen this way instead of open(, 'r', encoding='latin-1')
# for Pyton 2/3 compat
with open(idf_path, 'rb') as content_file:
content = content_file.read().decode('latin-1')
# This dict will be used to replace in the entire file content
replacement_dict = {}
# To find the class and object names
objname_found = False
classname_found = False
# Loop on each line
for line in content.splitlines():
t = line.split('!')[0].strip()
if len(t) > 0:
if ';' in t:
classname_found = False
objname_found = False
else:
if not classname_found:
classname = t.split(',')[0].strip()
classname_found = True
elif (not objname_found and
(classname.upper() not in NOT_REFERENCE_CLASSES)):
objname = t.split(',')[0].strip()
objname_found = True
escaped_objname = (objname.replace(" ", "_")
.replace("(", "_")
.replace(")", "_")
.replace("[", "_")
.replace("]", "_")
.replace("{", "_")
.replace("}", "_")
.replace("/", "_")
.replace("\\", "_")
.replace("-", "_")
.replace(".", "_")
.replace(":", "_"))
if escaped_objname != "":
escaped_line = line.replace(objname, escaped_objname)
# Add to the replacement_dict if need be only
if objname != escaped_objname:
replacement_dict[objname] = escaped_objname
tag = tag_s.format(tagname=escaped_objname,
tagfile=new_filename,
tagaddress=escaped_line)
tags.append(tag)
# Replace in the entire file content
# We go from the most specific (longer) to the shorter
# To avoid having problems where if I replace "A" by "B"
# and then try to replace "AB" by "CC" it will not find "AB"
for k in sorted(replacement_dict, key=len, reverse=True):
content = content.replace(k, replacement_dict[k])
# Write the new file
with open(new_filename, 'wb') as write_file:
write_file.write(content.encode('latin-1'))
return tags
def tag_idfs(idf_path=None, recursive=True):
"""
This creates a ctags file for the IDF (as well as new IDF linted file(s))
Args:
-----
* idf_path (str, or None): if a path to an idf file is given, only
this file will be tagged. Otherwise will do a glob of all idfs
* recursive (boolean): Whether the glob needs to be recursive or not
"""
if idf_path is None:
if recursive is True:
# Python 2 doesn't support recursive...
if sys.version_info[0] < 3:
# Python 2 doesn't support recursive...
import fnmatch
idf_paths = []
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, '*.idf'):
idf_path = os.path.join(root, filename)
idf_paths.append(os.path.relpath(idf_path))
else:
idf_paths = gb.glob('**/*.idf', recursive=True)
else:
idf_paths = gb.glob('*.idf')
else:
if not os.path.splitext(idf_path)[1] == '.idf':
raise ValueError("If `idf_path` is specified, "
"it must be a `.idf` file")
if not os.path.isfile(idf_path):
raise IOError("{} doesn't exists".format(idf_path))
idf_paths = [idf_path]
tags = []
for idf_path in idf_paths:
print("Processing: {}".format(idf_path))
tags += lint_and_tag_file(idf_path)
# Write tags file
with open('tags', 'wb') as tag_file:
tag_file.write('\n'.join(sorted(tags)).encode('latin-1'))
print("Generated tags file: {}".format(os.getcwd()))
```
#### File: idf-tags/tests/test_cli.py
```python
import sys
import os
import glob as gb
from subprocess import check_output, Popen, PIPE, STDOUT
import pytest
from idftags import __version__ as VERSION
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
class TestHelp():
"""
Py.test class for the help
"""
def test_help(self):
"""
Py.test for -h or --help
"""
output = check_output(['idf-tags', '-h'])
assert 'Usage:' in output.decode('utf-8')
output = check_output(['idf-tags', '--help'])
assert 'Usage:' in output.decode('utf-8')
def test_recursive_and_path(self):
"""
Py.test to check that if both --recursive and a path are given it
shows the help
"""
# Cannot call check_output, it's going to crash because the return code
# isn't 0 in this case (it is - after all - a non valid call!)
output = Popen(['idf-tags', '-r', 'i.idf'],
stdout=PIPE, stderr=STDOUT).communicate()[0]
assert 'Usage:' in output.decode('utf-8')
output = Popen(['idf-tags', '--recursive', 'i.idf'],
stdout=PIPE, stderr=STDOUT).communicate()[0]
assert 'Usage:' in output.decode('utf-8')
class TestVersion():
"""
Py.test class for version
"""
def test_version_short(self):
"""
Py.test for -v
"""
output = check_output(['idf-tags', '-v'])
assert output.decode('utf-8').strip() == VERSION
def test_version_long(self):
"""
Py.test for --version
"""
output = check_output(['idf-tags', '--version'])
assert output.decode('utf-8').strip() == VERSION
class TestIdfTagsCLI():
"""
Py.test class to test that the arguments are understood correctly by the
CLI
"""
@pytest.fixture(autouse=True)
def cleanup_out_files(self):
"""
Fixture run around tests. Will change the current working dir
Will delete all 'xx-out.idf' files created to avoid multiplication
of files.
"""
curdir = os.getcwd()
os.chdir("{}/test_files".format(TEST_DIR))
yield
# This runs even if the test failed
# Python 2 doesn't support recursive...
if sys.version_info[0] < 3:
# Python 2 doesn't support recursive...
import fnmatch
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, '*out.idf'):
idf_path = os.path.join(root, filename)
os.remove(idf_path)
else:
for filepath in gb.iglob("**/*out.idf", recursive=True):
os.remove(filepath)
# Teardown
os.chdir(curdir)
def test_without_recursive(self):
"""
Py.test when recursive isn't used
"""
output = check_output(['idf-tags']).decode('utf-8')
lines = output.split('\n')
assert len(lines) == 4
def test_with_recursive(self):
"""
Py.test when recursive is used
"""
output = check_output(['idf-tags', '-r']).decode('utf-8')
lines = output.split('\n')
assert len(lines) == 5
def test_with_path(self):
"""
Py.test for a single file
"""
output = check_output(['idf-tags',
'WaterHeaterStandAlone.idf']).decode('utf-8')
lines = output.split('\n')
# There's an extra newline character line... user sees two
# Processing xxxx.idf and "Generated tag"
assert len(lines) == 3
``` |
{
"source": "jmarrec/IDFVersionUpdater2",
"score": 2
} |
#### File: IDFVersionUpdater2/IDFVersionUpdater/VersionUpdaterWindow.py
```python
import os
import subprocess
import wx
from EnergyPlusPath import EnergyPlusPath
from TransitionRunThread import TransitionRunThread
from International import translate as _, Languages, set_language
from Settings import Keys, load_settings, save_settings
__program_name__ = "IDFVersionUpdater (v2.0)"
_doing_restart = False
def doing_restart():
"""
This function exposes the private, global, variable to determine if the program is going through an automated restart
:return: True if the program should be restarted after reloaded settings, or False to just let the program be done
"""
return _doing_restart
class VersionUpdaterWindow(wx.Frame):
""" The main window, or wx.Frame, for the IDFVersionUpdater program.
This initializer function creates instance variables, sets up threading, and builds the GUI"""
def __init__(self):
# initialize base class
wx.Frame.__init__(self, None, title=__program_name__, size=(600, 183))
self.SetMinSize((400, 175))
# load the settings here very early; the tilde is cross platform thanks to Python
self.settings_file_name = os.path.join(os.path.expanduser("~"), ".idfversionupdater.json")
self.settings = load_settings(self.settings_file_name)
# initialize some class-level "constants"
self.box_spacing = 4
# reset the restart flag
global _doing_restart
_doing_restart = False
# initialize instance variables to be set later
self.btn_select_idf = None
self.btn_about = None
self.lbl_path = None
self.lbl_old_version = None
self.chk_create_inter_versions = None
self.btn_update_file = None
self.btn_open_run_dir = None
self.btn_cancel = None
self.btn_exit = None
self.status_bar = None
self.idf_version = None
self.running_transition_thread = None
# try to load the settings very early since it includes initialization
set_language(self.settings[Keys.language])
# connect signals for the GUI
self.Bind(wx.EVT_CLOSE, self.on_closing_form)
# build up the GUI itself
self.build_gui()
# update the list of E+ versions
self.ep_run_folder = EnergyPlusPath()
title = TransitionRunThread.get_ep_version(os.path.join(self.ep_run_folder.installation_path, 'EnergyPlus'))
self.SetTitle(__program_name__ + " -- " + title)
self.status_bar.SetStatusText(_("Program Initialized"))
# check the validity of the idf versions once at load time to initialize the action availability
self.on_update_for_new_file(None)
# GUI Worker Functions
def build_gui(self):
"""
This function manages the window construction, including position, title, and presentation
"""
self.status_bar = self.CreateStatusBar() # A StatusBar in the bottom of the window
# highest level layout control is the main panel
panel = wx.Panel(self, wx.ID_ANY)
# this is then broken into rows with one vertical sizer
top_sizer = wx.BoxSizer(wx.VERTICAL)
# each row then has their own horizontal sizers on which controls are placed
choose_about_sizer = wx.BoxSizer(wx.HORIZONTAL)
path_version_sizer = wx.BoxSizer(wx.HORIZONTAL)
checkbox_sizer = wx.BoxSizer(wx.HORIZONTAL)
update_audit_close_sizer = wx.BoxSizer(wx.HORIZONTAL)
# let's create a bunch of controls
self.btn_select_idf = wx.Button(panel, wx.ID_ANY, _('Choose File to Update...'))
self.btn_select_idf.Bind(wx.EVT_BUTTON, self.on_choose_idf)
self.btn_about = wx.Button(panel, wx.ID_ANY, _('About...'))
self.btn_about.Bind(wx.EVT_BUTTON, self.on_about)
self.lbl_path = wx.TextCtrl(panel, wx.ID_ANY, _('File Path'), style=wx.BORDER_NONE)
self.lbl_path.Bind(wx.EVT_TEXT, self.on_update_for_new_file)
if self.settings[Keys.last_idf] is not None:
self.lbl_path.SetValue(self.settings[Keys.last_idf])
self.lbl_old_version = wx.StaticText(panel, wx.ID_ANY, _('Old Version'))
self.chk_create_inter_versions = wx.CheckBox(panel, wx.ID_ANY, _('Keep Intermediate Versions of Files?'))
self.chk_create_inter_versions.SetValue(True)
self.btn_update_file = wx.Button(panel, wx.ID_ANY, _('Update File'))
self.btn_update_file.Bind(wx.EVT_BUTTON, self.on_update_idf)
self.btn_cancel = wx.Button(panel, wx.ID_ANY, _('Cancel Run'))
self.btn_cancel.Bind(wx.EVT_BUTTON, self.on_cancel)
self.btn_cancel.Disable()
self.btn_open_run_dir = wx.Button(panel, wx.ID_ANY, _('Open Run Directory'))
self.btn_open_run_dir.Bind(wx.EVT_BUTTON, self.on_open_run_dir)
self.btn_exit = wx.Button(panel, wx.ID_ANY, _('Close'))
self.btn_exit.Bind(wx.EVT_BUTTON, self.on_close)
# now let's add the controls to each sizer
choose_about_sizer.Add(self.btn_select_idf, 1, flag=wx.ALIGN_LEFT | wx.LEFT, border=self.box_spacing)
choose_about_sizer.Add(wx.StaticText(panel, -1, ''), 100, wx.EXPAND, border=self.box_spacing)
choose_about_sizer.Add(self.btn_about, 1, flag=wx.ALIGN_RIGHT | wx.RIGHT, border=self.box_spacing)
path_version_sizer.Add(self.lbl_path, 4, flag=wx.ALIGN_LEFT | wx.LEFT, border=self.box_spacing)
path_version_sizer.Add(wx.StaticText(panel, -1, ''), 1, wx.EXPAND, border=self.box_spacing)
path_version_sizer.Add(self.lbl_old_version, 1, flag=wx.ALIGN_RIGHT | wx.RIGHT, border=self.box_spacing)
checkbox_sizer.Add(self.chk_create_inter_versions, 1, flag=wx.ALIGN_LEFT | wx.LEFT, border=self.box_spacing)
update_audit_close_sizer.Add(self.btn_update_file, 1, flag=wx.ALIGN_LEFT | wx.LEFT, border=self.box_spacing)
update_audit_close_sizer.Add(self.btn_cancel, 1, flag=wx.ALIGN_LEFT | wx.LEFT, border=self.box_spacing)
update_audit_close_sizer.Add(wx.StaticText(panel, -1, ''), 7, wx.EXPAND, border=self.box_spacing)
update_audit_close_sizer.Add(self.btn_open_run_dir, 1, flag=wx.ALIGN_RIGHT | wx.RIGHT, border=self.box_spacing)
update_audit_close_sizer.Add(self.btn_exit, 1, flag=wx.ALIGN_RIGHT | wx.RIGHT, border=self.box_spacing)
# then we'll add all the horizontal sizers into the main vertical sizer
top_sizer.Add(choose_about_sizer, proportion=1, flag=wx.EXPAND | wx.ALL, border=self.box_spacing)
top_sizer.Add(wx.StaticLine(panel, ), proportion=0, flag=wx.EXPAND | wx.ALL, border=self.box_spacing)
top_sizer.Add(path_version_sizer, proportion=1, flag=wx.EXPAND | wx.ALL, border=self.box_spacing)
top_sizer.Add(wx.StaticLine(panel, ), proportion=0, flag=wx.EXPAND | wx.ALL, border=self.box_spacing)
top_sizer.Add(checkbox_sizer, proportion=1, flag=wx.EXPAND | wx.ALL, border=self.box_spacing)
top_sizer.Add(wx.StaticLine(panel, ), proportion=0, flag=wx.EXPAND | wx.ALL, border=self.box_spacing)
top_sizer.Add(update_audit_close_sizer, proportion=1, flag=wx.EXPAND | wx.ALL, border=self.box_spacing)
# and now tell the panel we are using this topSizer
panel.SetSizer(top_sizer)
# also build out the menu bar
menu_bar = wx.MenuBar()
menu1 = wx.Menu()
# create the actual actionable items under the language menu
menu1.Append(Languages.English, "English", "Change language to English")
self.Bind(wx.EVT_MENU, self.on_switch_language, id=Languages.English)
menu1.Append(Languages.Spanish, "Spanish", "Change language to Spanish")
self.Bind(wx.EVT_MENU, self.on_switch_language, id=Languages.Spanish)
menu1.Append(Languages.French, "French", "Change language to French")
self.Bind(wx.EVT_MENU, self.on_switch_language, id=Languages.French)
menu1.AppendSeparator()
menu1.Append(106, "&Close\tCtrl+W", "Closes the window")
self.Bind(wx.EVT_MENU, self.on_close, id=106)
menu_bar.Append(menu1, "&File")
self.SetMenuBar(menu_bar) # Adding the MenuBar to the Frame content.
# finally show the window in the center of the (primary? current?) screen
self.Show(True)
self.CenterOnScreen()
def set_buttons_for_running(self, enabled):
"""
This function sets the state of different buttons on the main window while a background task is running
The list of controls to be enabled/disabled is hardcoded in an array in this function
:param enabled: True if the controls are to be enabled, for example when the process is complete, False to disable.
:return:
"""
buttons = [self.btn_update_file, self.btn_select_idf, self.btn_exit]
if enabled:
[x.Enable() for x in buttons]
else:
[x.Disable() for x in buttons]
buttons_to_invert = [self.btn_cancel]
if enabled:
[x.Disable() for x in buttons_to_invert]
else:
[x.Enable() for x in buttons_to_invert]
# Event Handlers
def on_switch_language(self, event):
"""
This function handles the request to change languages, where the language identifier is passed in with the event
:param event: The event information generated by the caller, for this function, the event ID should be set to
an item in the :py:class:`Languages <International.Languages>` enumeration class
"""
global _doing_restart
this_id = event.GetId()
language = this_id
self.settings[Keys.language] = language
message = wx.MessageDialog(
parent=self,
message=_(
"You must restart the app to make the language change take effect. Would you like to restart now?"),
caption=__program_name__,
style=wx.YES_NO | wx.CENTRE | wx.ICON_QUESTION)
resp = message.ShowModal()
if resp == wx.ID_YES:
_doing_restart = True
message.Destroy()
if doing_restart():
self.Close(False)
def on_update_for_new_file(self, event):
"""
This function handles the request to update for a new file, including updating program settings,
gui button state, and updating the file version label if the file exists
:param event: The event information generated by the caller, which is typically a text ctrl text change event
"""
if self.lbl_path is None or self.btn_update_file is None:
return
idf = self.lbl_path.GetValue()
self.settings[Keys.last_idf] = idf
if os.path.exists(idf):
self.on_msg(_("IDF File exists, ready to go"))
self.idf_version = self.get_idf_version(idf)
self.lbl_old_version.SetLabel("%s: %s" % (_('Old Version'), self.idf_version))
self.btn_update_file.Enable()
else:
self.on_msg(_("IDF File doesn't exist at path given; cannot transition"))
self.btn_update_file.Disable()
def on_open_run_dir(self, event):
"""
This function handles the request to open the current run directory in the default application (Finder...)
:param event: The event information generated by the caller, which in this case is a wx Button
"""
try:
cur_platform = EnergyPlusPath.get_platform()
open_cmd = ""
if cur_platform == "linux":
open_cmd = "xdg-open"
elif cur_platform == "mac":
open_cmd = "open"
elif cur_platform == "windows":
open_cmd = "explorer"
subprocess.Popen([open_cmd, self.ep_run_folder.transition_directory], shell=False)
except Exception:
message = wx.MessageDialog(
parent=self,
message=_("Could not open run directory"),
caption=__program_name__,
style=wx.OK | wx.CENTRE | wx.ICON_WARNING)
message.ShowModal()
message.Destroy()
def on_closing_form(self, event):
"""
This function handles the request to close the form, first trying to save program settings
:param event: The event information generated by the caller, which in this case is a wx Button
"""
try:
save_settings(self.settings, self.settings_file_name)
except Exception:
pass
self.Destroy()
def on_choose_idf(self, event):
"""
This function handles the request to choose a new idf, opening a dialog, and updating settings if applicable
:param event: The event information generated by the caller, which in this case is a wx Button
"""
cur_folder = ""
if self.settings[Keys.last_idf_folder] is not None:
cur_folder = self.settings[Keys.last_idf_folder]
cur_idf = ""
if self.settings[Keys.last_idf] is not None:
cur_idf = self.settings[Keys.last_idf]
open_file_dialog = wx.FileDialog(self, _("Open File for Transition"), cur_folder, cur_idf,
"EnergyPlus Input Files (*.idf;*.imf)|*.idf;*.imf",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
result = open_file_dialog.ShowModal()
if result == wx.ID_OK:
cur_idf = open_file_dialog.GetPath()
self.settings[Keys.last_idf] = cur_idf
self.settings[Keys.last_idf_folder] = os.path.dirname(cur_idf)
self.lbl_path.SetValue(cur_idf)
open_file_dialog.Destroy()
def on_about(self, event):
"""
This function handles the request to show the "About..." modal dialog window
:param event: The event information generated by the caller, which in this case is a wx Button
"""
message = wx.MessageDialog(parent=self,
message=_("ABOUT_DIALOG"),
caption=__program_name__,
style=wx.OK | wx.CENTRE | wx.ICON_INFORMATION)
message.ShowModal()
message.Destroy()
def on_update_idf(self, event):
"""
This function handles the request to run Transition itself, building up the list of transitions,
creating a new thread instance, prepping the gui, and running it
:param event: The event information generated by the caller, which in this case is a wx Button
"""
if self.idf_version not in [tr.source_version for tr in self.ep_run_folder.transitions_available]:
self.on_msg(_("Cannot find a matching transition tool for this idf version"))
# we need to build up the list of transition steps to perform
transitions_to_run = []
for tr in self.ep_run_folder.transitions_available:
if tr.source_version < self.idf_version:
continue # skip this older version
transitions_to_run.append(tr)
self.running_transition_thread = TransitionRunThread(
transitions_to_run,
self.ep_run_folder.transition_directory,
self.settings[Keys.last_idf],
self.chk_create_inter_versions.GetValue(),
self.callback_on_msg,
self.callback_on_done
)
self.running_transition_thread.start()
self.set_buttons_for_running(enabled=False)
def on_cancel(self, event):
self.btn_cancel.Disable()
self.running_transition_thread.stop()
def on_close(self, event):
"""
This function handles the request to close the form, simply calling Close
Note this does not destroy the form, allowing the owning code to still access the form settings
:param event: The event information generated by the caller, which in this case is a wx Button
"""
self.Close(False)
# Callback functions and delegates to be called on MainLoop thread
def callback_on_msg(self, message):
wx.CallAfter(self.on_msg, message)
def on_msg(self, message):
self.status_bar.SetStatusText(message)
def callback_on_done(self, message):
wx.CallAfter(self.on_done, message)
def on_done(self, message):
self.status_bar.SetStatusText(message)
self.set_buttons_for_running(enabled=True)
# Utilities
@staticmethod
def get_idf_version(path_to_idf):
"""
This function returns the current version of a given input file.
The function uses a simplified parsing approach so it only works for valid syntax files, and provides no specialized error handling
:param path_to_idf: Absolute path to a EnergyPlus input file
:rtype: A floating point version number for the input file, for example 8.5 for an 8.5.0 input file
"""
# phase 1: read in lines of file
with open(path_to_idf, "r") as fo:
lines = fo.readlines()
# phases 2: remove comments and blank lines
lines_a = []
for line in lines:
line_text = line.strip()
this_line = ""
if len(line_text) > 0:
exclamation = line_text.find("!")
if exclamation == -1:
this_line = line_text
elif exclamation == 0:
this_line = ""
elif exclamation > 0:
this_line = line_text[:exclamation]
if not this_line == "":
lines_a.append(this_line)
# phase 3: join entire array and re-split by semicolon
idf_data_joined = ''.join(lines_a)
idf_object_strings = idf_data_joined.split(";")
# phase 4: break each object into an array of object name and field values
for this_object in idf_object_strings:
tokens = this_object.split(',')
if tokens[0].upper() == "VERSION":
version_string = tokens[1]
version_string_tokens = version_string.split('.') # might be 2 or 3...
version_number = float("%s.%s" % (version_string_tokens[0], version_string_tokens[1]))
return version_number
```
#### File: IDFVersionUpdater2/test/test_VersionUpdaterWindow.py
```python
import os
import sys
import tempfile
import unittest
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'IDFVersionUpdater'))
from VersionUpdaterWindow import VersionUpdaterWindow
class TestGetIDFVersion(unittest.TestCase):
def setUp(self):
self.idf_name = tempfile.mktemp()
def test_good_version_number(self):
with open(self.idf_name, 'w') as f:
f.write("Version,8.5.0;")
version = VersionUpdaterWindow.get_idf_version(self.idf_name)
self.assertEqual(version, 8.5)
def test_bad_version_number(self):
with open(self.idf_name, 'w') as f:
f.write("Version,x.y.z;")
with self.assertRaises(ValueError):
VersionUpdaterWindow.get_idf_version(self.idf_name)
def test_missing_version_number(self):
with open(self.idf_name, 'w') as f:
f.write("x,y;")
version = VersionUpdaterWindow.get_idf_version(self.idf_name)
self.assertIsNone(version)
``` |
{
"source": "jmarrec/kiva",
"score": 2
} |
#### File: kiva/docs/conf.py
```python
import sys
import os
import shlex
from recommonmark.parser import CommonMarkParser
from datetime import datetime
from subprocess import Popen, PIPE
def get_version():
"""
Returns project version as string from 'git describe' command.
"""
pipe = Popen('git describe --tags --always', stdout=PIPE, shell=True)
version = pipe.stdout.read()
if version:
return version.rstrip().lstrip('v')
else:
return 'X.Y'
extensions = [
'sphinx.ext.mathjax',
]
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
project = u'Kiva'
copyright = u'2012-' + str(datetime.now().year) + u', Big Ladder Software'
author = u'<NAME>'
version = get_version()
release = version
exclude_patterns = ['_build']
pygments_style = 'sphinx'
language = 'en'
todo_include_todos = False
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
htmlhelp_basename = 'Kivadoc'
#html_split_index = True
#html_theme_options = {'collapsiblesidebar': True}
latex_elements = {}
latex_documents = [
(master_doc, 'Kiva.tex', u'Kiva Documentation',
u'Neal Kruis', 'manual'),
]
man_pages = [
(master_doc, 'kiva', u'Kiva Documentation',
[author], 1)
]
``` |
{
"source": "jmarrero/fedora-coreos-config",
"score": 2
} |
#### File: fedora-coreos-config/ci/remove-graduated-overrides.py
```python
import os
import sys
import json
import yaml
import subprocess
import dnf
import hawkey
ARCHES = ['s390x', 'x86_64', 'ppc64le', 'aarch64']
OVERRIDES_HEADER = """
# This lockfile should be used to pin to a package version (`type: pin`) or to
# fast-track packages ahead of Bodhi (`type: fast-track`). Fast-tracked
# packages will automatically be removed once they are in the stable repos.
#
# IMPORTANT: YAML comments *will not* be preserved. All `pin` overrides *must*
# include a URL in the `metadata.reason` key. Overrides of type `fast-track`
# *should* include a Bodhi update URL in the `metadata.bodhi` key and a URL
# in the `metadata.reason` key, though it's acceptable to omit a `reason`
# for FCOS-specific packages (ignition, afterburn, etc.).
"""
def main():
treefile = get_treefile()
base = get_dnf_base(treefile)
setup_repos(base, treefile)
for fn in get_lockfiles():
update_lockfile(base, fn)
def get_treefile():
treefile = subprocess.check_output(['rpm-ostree', 'compose', 'tree',
'--print-only', 'manifest.yaml'])
return json.loads(treefile)
def get_dnf_base(treefile):
base = dnf.Base()
base.conf.reposdir = "."
base.conf.releasever = treefile['releasever']
base.read_all_repos()
return base
def setup_repos(base, treefile):
for repo in base.repos.values():
repo.disable()
print("Enabled repos:")
for repo in treefile['repos']:
base.repos[repo].enable()
print(f"- {repo}")
print("Downloading metadata")
base.fill_sack(load_system_repo=False)
def get_lockfiles():
lockfiles = ['manifest-lock.overrides.yaml']
# TODO: for now, we only support the archless variant; supporting
# arch-specific lockfiles will require making dnf fetch metadata not just
# for the basearch on which we're running
# lockfiles += [f'manifest-lock.overrides.{arch}.yaml' for arch in ARCHES]
return lockfiles
def update_lockfile(base, fn):
if not os.path.exists(fn):
return
with open(fn) as f:
lockfile = yaml.load(f)
if 'packages' not in lockfile:
return
new_packages = {}
for name, lock in lockfile['packages'].items():
if ('metadata' not in lock or
lock['metadata'].get('type') != "fast-track"):
new_packages[name] = lock
continue
if 'evra' in lock:
nevra = f"{name}-{lock['evra']}"
else:
# it applies to all arches, so we can just check our arch (see
# related TODO above)
nevra = f"{name}-{lock['evr']}.{base.conf.basearch}"
graduated = sack_has_nevra_greater_or_equal(base, nevra)
if not graduated:
new_packages[name] = lock
else:
print(f"{fn}: {nevra} has graduated")
if lockfile['packages'] != new_packages:
lockfile['packages'] = new_packages
with open(fn, 'w') as f:
f.write(OVERRIDES_HEADER.strip())
f.write('\n\n')
yaml.dump(lockfile, f)
else:
print(f"{fn}: no packages graduated")
def sack_has_nevra_greater_or_equal(base, nevra):
nevra = hawkey.split_nevra(nevra)
pkgs = base.sack.query().filterm(name=nevra.name,
arch=nevra.arch).latest().run()
if len(pkgs) == 0:
# Odd... the only way I can imagine this happen is if we fast-track a
# brand new package from Koji which hasn't hit the updates repo yet.
# Corner-case, but let's be nice.
print(f"couldn't find package {nevra.name}; assuming not graduated")
return False
nevra_latest = hawkey.split_nevra(str(pkgs[0]))
return nevra_latest >= nevra
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jmarrietar/MILpy",
"score": 3
} |
#### File: MILpy/Algorithms/BOW.py
```python
import sys,os
import numpy as np
from sklearn.mixture import GMM
from sklearn.linear_model import LogisticRegression
from MILpy.functions.MIL2SIL import MIL2SIL
class BOW(object):
def __init__(self):
self._logistic = None
self._gauss_mix_model = None
def fit(self,train_bags,train_labels,**kwargs):
"""
@param train_bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param train_labels : an array-like object of length n containing -1/+1 labels
@param k : Number of 'words'
@param covar_type : Type of covariance matrix (default = 'diag')
"""
k = kwargs['k']
covar_type = kwargs['covar_type']
n_iter = kwargs['n_iter']
X, Y = MIL2SIL(train_bags,train_labels)
self._gauss_mix_model= GMM(n_components=k,covariance_type=covar_type, init_params='wc', n_iter=n_iter)
self._gauss_mix_model.fit(X)
out_hist = self._gauss_mix_model.predict_proba(X)
#Logistic separate positive histograms from negative histograms
self._logistic = LogisticRegression()
self._logistic = self._logistic.fit(out_hist,Y)
def predict(self,test_bags):
"""
@param test_bags: a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@return : an array of length n containing real-valued label predictions
"""
n = len(test_bags)
bags_out_test=[]
for i in range (0,n):
sil_bag, _= MIL2SIL(test_bags[i],[0])
out_test = self._gauss_mix_model.predict_proba(sil_bag)
out_test = np.mean(out_test,axis=0)
bags_out_test.append(out_test.reshape(1,len(out_test)))
bags_out_test = np.vstack(bags_out_test)
out_predicted = self._logistic.predict(bags_out_test)
return out_predicted
```
#### File: MILpy/Algorithms/CKNN.py
```python
import numpy as np
import scipy.spatial.distance as dist
class CKNN(object):
"""
Citation-KNN
"""
def __init__(self):
self._bags = None
self._bag_predictions = None
self._labels = None
self._full_bags = None
self._DM = None
def fit(self, train_bags, train_labels, **kwargs):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param y : an array-like object of length n containing -1/+1 labels
"""
self._bags = train_bags
self._labels = train_labels
self._R = kwargs['references']
self._C = kwargs['citers']
def predict(self, Testbags):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@return : an array of length n containing real-valued label predictions
@R : References
@C : Citers
"""
#Unir Bolsas de Training and Testing
train_bags = self._bags
full_bags = self._bags+Testbags
pred_labels = np.array([])
self._DM = self.DistanceMatrix(full_bags)
for num in range(len(self._bags),len(full_bags) ):
number = num
REFERENCES = self._DM[number,0:self._R]
CiteMatrix =self._DM[:,0:self._C]
CITERS,j = np.where(CiteMatrix == number)
LabelsTrainCiters = self._labels[CITERS[CITERS<len(train_bags)]]
LabelsTrainRef = self._labels[REFERENCES[REFERENCES<len(train_bags)]]
Rp = np.count_nonzero(LabelsTrainRef == 1)
Rn = np.count_nonzero(LabelsTrainRef == 0)
Cp = np.count_nonzero(LabelsTrainCiters == 1)
Cn = np.count_nonzero(LabelsTrainCiters == 0)
if Rp+Cp> Rn+Cn:
label_out = 1
else:
label_out = 0
pred_labels = np.append(pred_labels,label_out)
return pred_labels
#Distancias de las Bolsas
#Se hallan las distancias de las Bolsas a todas las demas.
def DistanceMatrix (self,bags):
BagDistances ={}
count=0
#Bucle para recorrer todas las Bolsas
for bag in bags:
#Hallar la distancia Hausdorr de Todas las bolsas con todas
for i in range(0, len(bags)):
BagDistances[i] = _min_hau_bag(bags[i],bag)
references_bag = sorted(BagDistances.items(), key=lambda x: x[1]) #Ordeno las bolsas referentes de la Bolsa seleccionada
REF_Bag_p = []
for j in range(0, len(references_bag)):
REF_Bag_p.append(references_bag[j][0])
if count==0:
DistanceMatrix = np.matrix(REF_Bag_p)
else:
DistanceMatrix = np.vstack([DistanceMatrix, REF_Bag_p])
count=count+1
return DistanceMatrix
def _hau_bag(X,Y):
"""
@param X : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param Y : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@return : Hausdorff_distance
"""
Hausdorff_distance = max(max((min([list(dist.euclidean(x, y) for y in Y) for x in X]))),
max((min([list(dist.euclidean(x, y) for x in X) for y in Y]))))
return Hausdorff_distance
def _min_hau_bag(X,Y):
"""
@param X : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param Y : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@return : Hausdorff_distance
"""
Hausdorff_distance = max(min((min([list(dist.euclidean(x, y) for y in Y) for x in X]))),
min((min([list(dist.euclidean(x, y) for x in X) for y in Y]))))
return Hausdorff_distance
```
#### File: MILpy/Algorithms/maxDD.py
```python
from MILpy.functions.bagprob import bagprob
from MILpy.functions.maxdd2 import maxdd
import numpy as np
from numpy import inf
from sklearn.linear_model import LogisticRegression
class maxDD(object):
def __init__(self):
self._spoints = None
self._epochs = None
self._frac = None
self._tol = None
self._maxConcept = None
self._end = None
self._model = None
def fit(self,train_bags,train_labels,spoints = 10,epochs = np.array([4,4]),frac = 1,tol=[1e-5,1e-5,1e-7,1e-7],**kwargs):
"""
@param train_bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param train_labels : an array-like object of length n containing -1/+1 labels
"""
self._spoints = spoints
self._epochs = epochs
self._frac = frac
self._tol = tol
index=np.array(np.where(train_labels == 1))
index=index.transpose()[0]
bagI = train_labels #Labels de las Bolsas
pbags=[] #positive Bags
for i in range(0,len(index)):
indice=index[i]
pbags.append(train_bags[indice])
_,dim = pbags[0].shape
#Missing condition If Spoints Empty choose all.
#PENDING: ADD MORE CONDITIONS IF IT FAILS
tmp = np.vstack(pbags)
I = np.random.permutation(len(tmp))
#Missing Spoints conditionals
spoints = tmp[I[0:spoints]]
#Missing scales conditionals
scales = 0.1*np.ones(dim)
epochs = epochs*dim
# begin diverse density maximization
self._maxConcept,concepts = maxdd(spoints,scales,train_bags,bagI,epochs,tol)
#Invent a threshold...:
self._end=len(self._maxConcept[0])
n = len(train_bags)
out = np.zeros(n)
for i in range (0,n):
out[i], _ = bagprob(train_bags[i],1,self._maxConcept[0][0:dim],self._maxConcept[0][dim:self._end])
out=out.reshape(len(out),1)
train_labels=train_labels.reshape(len(train_labels),1)
train_labels=np.ravel(train_labels)
model = LogisticRegression()
self._model = model.fit(out,train_labels)
def predict(self,test_bags):
"""
@param test_bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
"""
pbagsT=[]
for i in range(0,len(test_bags)):
pbagsT.append(test_bags[i])
_,dimT = pbagsT[0].shape
nT = len(pbagsT)
outT = np.zeros(nT)
for i in range (0,nT):
# check if any objects fall inside the bounds
outT[i], _ = bagprob(pbagsT[i],1,self._maxConcept[0][0:dimT],self._maxConcept[0][dimT:self._end])
outT=outT.reshape(len(outT),1)
predicted = self._model.predict(outT)
return predicted, outT
```
#### File: MILpy/Algorithms/simpleMIL.py
```python
import numpy as np
from sklearn import svm
class simpleMIL(object):
def __init__(self):
self._model = None
self._type = None
def fit(self,train_bags,train_labels,**kwargs):
"""
@param train_bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param train_labels : an array-like object of length n containing -1/+1 labels
"""
self._type = kwargs['type']
if self._type == 'average':
bag_mean = np.asarray([np.mean(bag, axis=0) for bag in train_bags])
bag_modified = bag_mean
elif self._type == 'extreme':
bag_max = np.asarray([np.amax(bag,axis=0) for bag in train_bags])
bag_min = np.asarray([np.amin(bag,axis=0) for bag in train_bags])
bag_extreme = np.concatenate((bag_max,bag_min),axis=1)
bag_modified = bag_extreme
elif self._type == 'max':
bag_max = np.asarray([np.amax(bag,axis=0) for bag in train_bags])
bag_modified = bag_max
elif self._type == 'min':
bag_min = np.asarray([np.amin(bag,axis=0) for bag in train_bags])
bag_modified = bag_min
else:
print 'No exist'
self._model = svm.SVC()
self._model.fit(bag_modified, train_labels)
def predict(self,test_bags):
"""
@param test_bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
"""
bag_modified_test = None
if self._type == 'average':
bag_mean_test=np.asarray([np.mean(bag, axis=0) for bag in test_bags])
bag_modified_test = bag_mean_test
elif self._type == 'extreme':
bag_max_test = np.asarray([np.amax(bag,axis=0) for bag in test_bags])
bag_min_test = np.asarray([np.amin(bag,axis=0) for bag in test_bags])
bag_extreme_test = np.concatenate((bag_max_test,bag_min_test),axis=1)
bag_modified_test = bag_extreme_test
elif self._type == 'max':
bag_max_test = np.asarray([np.amax(bag,axis=0) for bag in test_bags])
bag_modified_test = bag_max_test
elif self._type == 'min':
bag_min_test = np.asarray([np.amin(bag,axis=0) for bag in test_bags])
bag_modified_test = bag_min_test
else:
print 'No exist'
predictions = self._model.predict(bag_modified_test)
return predictions
```
#### File: MILpy/functions/traindecstump.py
```python
import sys
import numpy as np
def traindecstump(X,w):
# [H,BESTERR] = TRAINDECSSTUMP(X,W)
#
# INPUT
# X Dataset
# W Weight per object
# DESCRIPTION
# Train a decision stump on dataset X. Each object in X is weighted by a
# weight W. Objects from the positive class have a positive weight, and
# otherwise the weights should be negative.
#
# The result is returned in vector H:
# H(1) the feature to threshold
# H(2) the threshold set on that feature
# H(3) the sign (+: right side is positive class, -: neg. side)
# Also the minimum error is returned in BESTERR.
#
n,dim = X.shape
sumneg = w[w< 0].sum()
sumpos = w[w> 0].sum()
besterr = float('Inf')
bestfeat = 0
bestthr = 0
bestsgn = 0
for i in range (0,dim-1):
# find the best threshold for feature i
# assume that the positive class is on the right of the decision
# threshold:
sx= np.sort(X[:,i],axis=0)
J=np.argsort(X[:,i],axis=0)
z = np.cumsum(w[J])
err1 = -sumneg + z
minerr=min(err1)
I=np.argmin(err1)
if (minerr<besterr):
besterr = minerr
bestfeat = i
if (I==n-1):
bestthr = sx[I]+10*sys.float_info.epsilon
else:
bestthr = (sx[I]+sx[I+1])/2 + sys.float_info.epsilon
bestsgn = 1
#Now assume that the positive class is on the left of the decision
#threshold:
err2 = sumpos - z
minerr=min(err2)
I=np.argmin(err2)
if (minerr<besterr):
besterr = minerr
bestfeat = i;
if (I==n-1):
bestthr = sx[I]+10*sys.float_info.epsilon
else:
bestthr = (sx[I]+sx[I+1])/2 + sys.float_info.epsilon
bestsgn = -1
return {'bestfeat':bestfeat, 'bestthr':float(bestthr),'bestsgn':bestsgn,'besterr':besterr}
``` |
{
"source": "jmarrietar/objax",
"score": 2
} |
#### File: objax/tests/jit.py
```python
import unittest
import jax.numpy as jn
from jax.core import ConcretizationTypeError
import objax
from objax.typing import JaxArray
class LinearArgs(objax.nn.Linear):
def __call__(self, x: JaxArray, some_args: float) -> JaxArray:
"""Returns the results of applying the linear transformation to input x."""
y = jn.dot(x, self.w.value) * some_args
if self.b:
y += self.b.value
return y
class LinearTrain(objax.nn.Linear):
def __call__(self, x: JaxArray, training: bool) -> JaxArray:
"""Returns the results of applying the linear transformation to input x."""
y = jn.dot(x, self.w.value)
if training:
y = -y
if self.b:
y += self.b.value
return y
class TestJit(unittest.TestCase):
def test_on_linear(self):
k = objax.nn.Linear(3, 3)
kj = objax.Jit(k)
x = objax.random.normal((64, 3))
y1 = kj(x)
k.w.assign(k.w.value + 1)
y2 = kj(x)
k.w.assign(k.w.value - 1)
y3 = kj(x)
self.assertAlmostEqual(((y1 - y3) ** 2).sum(), 0)
self.assertNotEqual(((y1 - y2) ** 2).sum(), 0)
def test_double_jit(self):
k = objax.nn.Linear(3, 3)
kj = objax.Jit(objax.Jit(k))
x = objax.random.normal((64, 3))
y1 = kj(x)
k.w.assign(k.w.value + 1)
y2 = kj(x)
k.w.assign(k.w.value - 1)
y3 = kj(x)
self.assertAlmostEqual(((y1 - y3) ** 2).sum(), 0)
self.assertNotEqual(((y1 - y2) ** 2).sum(), 0)
def test_jit_kwargs(self):
x = objax.random.normal((64, 3))
kj = objax.Jit(LinearArgs(3, 3))
y1 = kj(x, 1)
y2 = kj(x, some_args=1)
y3 = kj(x, some_args=2)
self.assertEqual(y1.tolist(), y2.tolist())
self.assertNotEqual(y1.tolist(), y3.tolist())
kj = objax.Jit(LinearTrain(3, 3))
with self.assertRaises(ConcretizationTypeError):
kj(x, training=True)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jmarrietar/tensorflow-recorder",
"score": 2
} |
#### File: tensorflow-recorder/tfrecorder/client_test.py
```python
import os
from typing import List
import csv
import tempfile
import unittest
import mock
import pandas as pd
from tfrecorder import client
from tfrecorder import constants
from tfrecorder import test_utils
class ClientTest(unittest.TestCase):
"""Misc tests for `client` module."""
def setUp(self):
self.test_df = test_utils.get_test_df()
self.test_region = 'us-central1'
self.test_project = 'foo'
@mock.patch('tfrecorder.client.beam_pipeline')
def test_create_tfrecords_direct_runner(self, mock_beam):
"""Tests `create_tfrecords` Direct case."""
mock_beam.build_pipeline().run().wait_until_finished.return_value = {
'rows':6}
r = client.create_tfrecords(
self.test_df,
runner='DirectRunner',
output_dir='/tmp/direct_runner')
self.assertTrue('metrics' in r)
@mock.patch('tfrecorder.client.beam_pipeline')
def test_create_tfrecords_dataflow_runner(self, mock_beam):
"""Tests `create_tfrecords` Dataflow case."""
mock_beam.build_pipeline().run().job_id.return_value = 'foo_id'
df2 = self.test_df.copy()
df2[constants.IMAGE_URI_KEY] = 'gs://' + df2[constants.IMAGE_URI_KEY]
outdir = '/tmp/dataflow_runner'
expected = {
'job_id': 'foo_id',
'dataflow_url': 'https://console.cloud.google.com/dataflow/jobs/' +
'us-central1/foo_id?project=foo'}
os.makedirs(outdir, exist_ok=True)
r = client.create_tfrecords(
df2,
runner='DataflowRunner',
output_dir=outdir,
region=self.test_region,
project=self.test_project)
self.assertEqual(r, expected)
# pylint: disable=protected-access
class InputValidationTest(unittest.TestCase):
"""'Tests for validation input data."""
def setUp(self):
self.test_df = test_utils.get_test_df()
self.test_region = 'us-central1'
self.test_project = 'foo'
def test_valid_dataframe(self):
"""Tests valid DataFrame input."""
self.assertIsNone(
client._validate_data(
self.test_df))
def test_missing_image(self):
"""Tests missing image column."""
with self.assertRaises(AttributeError):
df2 = self.test_df.copy()
df2.drop('image_uri', inplace=True, axis=1)
client._validate_data(df2)
def test_missing_label(self):
"""Tests missing label column."""
with self.assertRaises(AttributeError):
df2 = self.test_df.copy()
df2.drop('label', inplace=True, axis=1)
client._validate_data(df2)
def test_missing_split(self):
"""Tests missing split column."""
with self.assertRaises(AttributeError):
df2 = self.test_df.copy()
df2.drop('split', inplace=True, axis=1)
client._validate_data(df2)
def test_columns_out_of_order(self):
"""Tests validating wrong column order."""
with self.assertRaises(AttributeError):
df2 = self.test_df.copy()
cols = ['image_uri', 'split', 'label']
df2 = df2[cols]
client._validate_data(df2)
def test_valid_runner(self):
"""Tests valid runner."""
self.assertIsNone(client._validate_runner(
self.test_df,
runner='DirectRunner',
project=self.test_project,
region=self.test_region))
def test_invalid_runner(self):
"""Tests invalid runner."""
with self.assertRaises(AttributeError):
client._validate_runner(
self.test_df,
runner='FooRunner',
project=self.test_project,
region=self.test_region)
def test_local_path_with_dataflow_runner(self):
"""Tests DataflowRunner conflict with local path."""
with self.assertRaises(AttributeError):
client._validate_runner(
self.df_test,
runner='DataflowRunner',
project=self.test_project,
region=self.test_region)
def test_gcs_path_with_dataflow_runner(self):
"""Tests DataflowRunner with GCS path."""
df2 = self.test_df.copy()
df2[constants.IMAGE_URI_KEY] = 'gs://' + df2[constants.IMAGE_URI_KEY]
self.assertIsNone(
client._validate_runner(
df2,
runner='DataflowRunner',
project=self.test_project,
region=self.test_region))
def test_gcs_path_with_dataflow_runner_missing_param(self):
"""Tests DataflowRunner with missing required parameter."""
df2 = self.test_df.copy()
df2[constants.IMAGE_URI_KEY] = 'gs://' + df2[constants.IMAGE_URI_KEY]
for p, r in [
(None, self.test_region), (self.test_project, None), (None, None)]:
with self.assertRaises(AttributeError) as context:
client._validate_runner(
df2,
runner='DataflowRunner',
project=p,
region=r)
self.assertTrue('DataflowRunner requires valid `project` and `region`'
in repr(context.exception))
def _make_csv_tempfile(data: List[List[str]]) -> tempfile.NamedTemporaryFile:
"""Returns `NamedTemporaryFile` representing an image CSV."""
f = tempfile.NamedTemporaryFile(mode='w+t', suffix='.csv')
writer = csv.writer(f, delimiter=',')
for row in data:
writer.writerow(row)
f.seek(0)
return f
def get_sample_image_csv_data() -> List[List[str]]:
"""Returns sample CSV data in Image CSV format."""
data = test_utils.get_test_data()
header = list(data.keys())
content = [list(row) for row in zip(*data.values())]
return [header] + content
class ReadCSVTest(unittest.TestCase):
"""Tests `read_csv`."""
def setUp(self):
data = get_sample_image_csv_data()
self.header = data.pop(0)
self.sample_data = data
def test_valid_csv_no_header_no_names_specified(self):
"""Tests a valid CSV without a header and no header names given."""
f = _make_csv_tempfile(self.sample_data)
actual = client.read_csv(f.name, header=None)
self.assertEqual(list(actual.columns), constants.IMAGE_CSV_COLUMNS)
self.assertEqual(actual.values.tolist(), self.sample_data)
def test_valid_csv_no_header_names_specified(self):
"""Tests valid CSV without a header, but header names are given."""
f = _make_csv_tempfile(self.sample_data)
actual = client.read_csv(f.name, header=None, names=self.header)
self.assertEqual(list(actual.columns), self.header)
self.assertEqual(actual.values.tolist(), self.sample_data)
def test_valid_csv_with_header_no_names_specified(self):
"""Tests valid CSV with header, and no header names given (inferred)."""
f = _make_csv_tempfile([self.header] + self.sample_data)
actual = client.read_csv(f.name)
self.assertEqual(list(actual.columns), self.header)
self.assertEqual(actual.values.tolist(), self.sample_data)
def test_valid_csv_with_header_names_specified(self):
"""Tests valid CSV with header, and header names given (override)."""
f = _make_csv_tempfile([self.header] + self.sample_data)
actual = client.read_csv(f.name, names=self.header, header=0)
self.assertEqual(list(actual.columns), self.header)
self.assertEqual(actual.values.tolist(), self.sample_data)
class ToDataFrameTest(unittest.TestCase):
"""Tests `to_dataframe`."""
def setUp(self) -> None:
sample_data = get_sample_image_csv_data()
columns = sample_data.pop(0)
self.input_df = pd.DataFrame(sample_data, columns=columns)
@mock.patch.object(client, 'read_csv', autospec=True)
def test_input_csv(self, read_csv):
"""Tests valid input CSV file."""
expected = self.input_df
read_csv.return_value = expected
f = _make_csv_tempfile(get_sample_image_csv_data())
actual = client.to_dataframe(f.name)
pd.testing.assert_frame_equal(actual, expected)
def test_input_dataframe_no_names_specified(self):
"""Tests valid input dataframe with no header names specified."""
actual = client.to_dataframe(self.input_df)
pd.testing.assert_frame_equal(actual, self.input_df)
def test_input_dataframe_with_header(self):
"""Tests valid input dataframe with header specified."""
names = list(self.input_df.columns[0:-1])
actual = client.to_dataframe(self.input_df, names=names)
pd.testing.assert_frame_equal(actual, self.input_df[names])
def test_error_invalid_inputs(self):
"""Tests error handling with different invalid inputs."""
inputs = [0, 'not_a_csv_file', list(), dict()]
for input_data in inputs:
with self.assertRaises(ValueError):
client.to_dataframe(input_data)
if __name__ == '__main__':
unittest.main()
```
#### File: tensorflow-recorder/tfrecorder/test_utils.py
```python
from typing import Any, Dict, List
import os
import numpy as np
from PIL import Image
from apache_beam.testing import test_pipeline
import pandas as pd
TEST_DIR = 'tfrecorder/test_data'
def get_test_df():
"""Gets a test dataframe that works with the data in test_data/."""
return pd.read_csv(os.path.join(TEST_DIR, 'data.csv'))
def get_test_data() -> Dict[str, List[Any]]:
"""Returns test data in columnar format."""
return get_test_df().to_dict(orient='list')
def get_test_pipeline():
"""Gets a test pipeline."""
return test_pipeline.TestPipeline(runner='DirectRunner')
def make_random_image(height, width, channels):
"""Returns a random Numpy image."""
return Image.fromarray(
(np.random.random((height, width, channels)) * 255).astype(np.uint8))
``` |
{
"source": "jmarshall9120/django-oscar",
"score": 2
} |
#### File: oscar/utils/srcsets.py
```python
from django.conf import settings
from oscar.core.thumbnails import get_thumbnailer
default_SRCSETS = {
'fullsizes': {
'fullscreen': 1080,
'tablet': 780,
'mobile_large': 520,
'moble_small': 280,
},
'thumbnails': {
'large': 100,
'small': 50,
}
}
get_settings = lambda: getattr(settings,'OSCAR_SRCSETS', default_SRCSETS)
#tesed
def _get_srcset_sizes(srcsets=None, image_type=None):
"""Get sizes by any of the level 1 keys in the settings.SRCSETS dict."""
srcsets = srcsets or get_settings()
return ((image_type, k, v) for k,v in srcsets.get(image_type,{}).items()) if image_type \
else ((k_2, k_1, v_1) for k_2, v_2 in srcsets.items() for k_1, v_1 in v_2.items())
#tested
def get_srcset_image(source, width, image_type=None, image_processor=None, do_upscale=False,
settings=settings, **options):
"""Build and return the source set image objects."""
# have to wrap sizes so they pass to thumbnailer as 'widthxheight'.
# this is probably the worst part of the thumbnailers API.
calc_img_dim = lambda source, width: f"{width}x{int((width / source.width) * source.height)}"
# prevent upscaling by default. Filter sizes out that are greater than the
# uploaded source image.
if not do_upscale and width >= source.width:
return
# give the options of passing in a custom image processor.
image_processor = image_processor or get_thumbnailer().generate_thumbnail
return image_processor(source, **dict({'size':calc_img_dim(source, width)},**options))
def get_srcset(source, image_type=None, image_processor=None, do_upscale=False,
settings=settings, **options):
gen_sizes = _get_srcset_sizes(get_settings(), image_type=image_type)
gen_create_image = (
(image_type, size, width, get_srcset_image(
source, width,
image_type=image_type,
image_processor=image_processor,
do_upscale=do_upscale,
settings=settings,
**options
)) for image_type, size, width in gen_sizes)
return (
SrcsetImage(image, size, width, image_type)
for image_type, size, width, image in gen_create_image
if image is not None
)
class SrcsetImage():
def __init__(self, image, size, width, image_type):
self.image = image
self.size = size
self.width = width
self.image_type = image_type
return
def __str__(self):
return f'{image.url} {width}w'
class SrcsetCollection():
def __init__(self, instance, srcset_images):
self.instance = instance
self._srcset = srcset_images
return
def __getattr__(self, name):
rtn = next((si for si in self._srcset if si.size==name), None) \
or [si for si in self._srcset if si.image_type==name]
if rtn:
return rtn
raise AttributeError()
def __getitem__(self,name):
return next((si for si in self._srcset if si.width==name), None) \
or self.__getattr__(name)
def __str__(self):
return str(self._srcset)
def __len__(self):
return len(self._srcset)
class SrcSetDescriptor:
def __init__(self, source_field, image_type=None, image_processor=None, do_upscale=False,
settings=settings, **options):
self.source_field = source_field
self.image_type = image_type
self.image_processor = image_processor or get_thumbnailer().generate_thumbnail
self.do_upscale=do_upscale
self.settings = settings or get_settings()
self.options = options
return
def __get__(self, instance, cls=None):
srcset = get_srcset(
getattr(instance, self.source_field),
image_type=self.image_type,
image_processor=self.image_processor,
do_upscale=self.do_upscale,
settings=self.settings,
**self.options
)
return SrcsetCollection(instance, list(srcset))
``` |
{
"source": "jmarshall/pysam",
"score": 2
} |
#### File: pysam/tests/tabixproxies_test.py
```python
import unittest
import pysam
import os
import sys
import re
import copy
import gzip
from TestUtils import load_and_convert, make_data_files, TABIX_DATADIR
def setUpModule():
make_data_files(TABIX_DATADIR)
class TestParser(unittest.TestCase):
filename = os.path.join(TABIX_DATADIR, "example.gtf.gz")
def setUp(self):
self.tabix = pysam.TabixFile(self.filename)
self.compare = load_and_convert(self.filename)
def tearDown(self):
self.tabix.close()
def testRead(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asTuple())):
c = self.compare[x]
self.assertEqual(c, list(r))
self.assertEqual(len(c), len(r))
# test indexing
for y in range(0, len(r)):
self.assertEqual(c[y], r[y])
# test slicing access
for y in range(0, len(r) - 1):
for cc in range(y + 1, len(r)):
self.assertEqual(c[y:cc],
r[y:cc])
self.assertEqual("\t".join(map(str, c)),
str(r))
def testWrite(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asTuple())):
self.assertEqual(self.compare[x], list(r))
c = list(r)
for y in range(len(r)):
r[y] = "test_%05i" % y
c[y] = "test_%05i" % y
self.assertEqual([x for x in c], list(r))
self.assertEqual("\t".join(c), str(r))
# check second assignment
for y in range(len(r)):
r[y] = "test_%05i" % y
self.assertEqual([x for x in c], list(r))
self.assertEqual("\t".join(c), str(r))
def testUnset(self):
for x, r in enumerate(self.tabix.fetch(parser=pysam.asTuple())):
self.assertEqual(self.compare[x], list(r))
c = list(r)
e = list(r)
for y in range(len(r)):
r[y] = None
c[y] = None
e[y] = ""
self.assertEqual(c, list(r))
self.assertEqual("\t".join(e), str(r))
def testIteratorCompressed(self):
'''test iteration from compressed file.'''
with gzip.open(self.filename) as infile:
for x, r in enumerate(pysam.tabix_iterator(
infile, pysam.asTuple())):
self.assertEqual(self.compare[x], list(r))
self.assertEqual(len(self.compare[x]), len(r))
# test indexing
for c in range(0, len(r)):
self.assertEqual(self.compare[x][c], r[c])
# test slicing access
for c in range(0, len(r) - 1):
for cc in range(c + 1, len(r)):
self.assertEqual(self.compare[x][c:cc],
r[c:cc])
def testIteratorUncompressed(self):
'''test iteration from uncompressed file.'''
tmpfilename = 'tmp_testIteratorUncompressed'
with gzip.open(self.filename, "rb") as infile, \
open(tmpfilename, "wb") as outfile:
outfile.write(infile.read())
with open(tmpfilename) as infile:
for x, r in enumerate(pysam.tabix_iterator(
infile, pysam.asTuple())):
self.assertEqual(self.compare[x], list(r))
self.assertEqual(len(self.compare[x]), len(r))
# test indexing
for c in range(0, len(r)):
self.assertEqual(self.compare[x][c], r[c])
# test slicing access
for c in range(0, len(r) - 1):
for cc in range(c + 1, len(r)):
self.assertEqual(self.compare[x][c:cc],
r[c:cc])
os.unlink(tmpfilename)
def testCopy(self):
a = self.tabix.fetch(parser=pysam.asTuple()).next()
b = copy.copy(a)
self.assertEqual(a, b)
a = self.tabix.fetch(parser=pysam.asGTF()).next()
b = copy.copy(a)
self.assertEqual(a, b)
class TestGTF(TestParser):
parser = pysam.asGTF
def build_attribute_string(self, d):
"""build attribute string from dictionary d"""
s = "; ".join(["{} \"{}\"".format(x, y) for (x, y) in d.items()]) + ";"
# remove quotes around numeric values
s = re.sub(r'"(\d+)"', r'\1', s)
return s
def testRead(self):
for x, r in enumerate(self.tabix.fetch(parser=self.parser())):
c = self.compare[x]
self.assertEqual(len(c), len(r))
self.assertEqual(list(c), list(r))
self.assertEqual(c, str(r).split("\t"))
self.assertTrue(r.gene_id.startswith("ENSG"))
if r.feature != 'gene':
self.assertTrue(r.transcript_id.startswith("ENST"))
self.assertEqual(c[0], r.contig)
self.assertEqual("\t".join(map(str, c)),
str(r))
def test_setting_fields(self):
r = self.tabix.fetch(parser=self.parser()).next()
r.contig = r.contig + "_test_contig"
r.source = r.source + "_test_source"
r.feature = r.feature + "_test_feature"
r.start += 10
r.end += 10
r.score = 20
r.strand = "+"
r.frame = 0
r.attributes = 'gene_id "0001";'
r.transcript_id = "0002"
sr = str(r)
self.assertTrue("_test_contig" in sr)
self.assertTrue("_test_source" in sr)
self.assertTrue("_test_feature" in sr)
self.assertTrue("gene_id \"0001\"" in sr)
self.assertTrue("transcript_id \"0002\"" in sr)
def test_setAttribute_makes_changes(self):
r = self.tabix.fetch(parser=self.parser()).next()
r.setAttribute("transcript_id", "abcd")
sr = str(r)
self.assertEqual(r.transcript_id, "abcd")
self.assertTrue("transcript_id \"abcd\"" in sr)
def test_added_attribute_is_output(self):
r = self.tabix.fetch(parser=self.parser()).next()
r.new_int_attribute = 12
self.assertTrue("new_int_attribute 12" in str(r).split("\t")[8])
r.new_float_attribute = 12.0
self.assertTrue("new_float_attribute 12.0" in str(r).split("\t")[8])
r.new_text_attribute = "abc"
self.assertTrue("new_text_attribute \"abc\"" in str(r).split("\t")[8])
def test_setting_start_is_one_based(self):
r = self.tabix.fetch(parser=self.parser()).next()
r.start = 1800
self.assertEqual(r.start, 1800)
self.assertEqual(str(r).split("\t")[3], "1801")
def test_setting_end_is_one_based(self):
r = self.tabix.fetch(parser=self.parser()).next()
r.end = 2100
self.assertEqual(r.end, 2100)
self.assertEqual(str(r).split("\t")[4], "2100")
def test_setting_frame_to_none_produces_dot(self):
r = self.tabix.fetch(parser=self.parser()).next()
r.frame = None
self.assertEqual(str(r).split("\t")[7], ".")
r.frame = 2
self.assertEqual(str(r).split("\t")[7], "2")
r = self.tabix.fetch(parser=self.parser()).next()
r.frame = "."
self.assertEqual(r.frame, None)
self.assertEqual(str(r).split("\t")[7], ".")
def test_setting_source_to_none_produces_dot(self):
r = self.tabix.fetch(parser=self.parser()).next()
r.source = None
self.assertEqual(str(r).split("\t")[1], ".")
r.source = "source"
self.assertEqual(str(r).split("\t")[1], "source")
r = self.tabix.fetch(parser=self.parser()).next()
r.source = "."
self.assertEqual(r.source, None)
self.assertEqual(str(r).split("\t")[1], ".")
def test_setting_feature_to_none_produces_dot(self):
r = self.tabix.fetch(parser=self.parser()).next()
r.feature = None
self.assertEqual(str(r).split("\t")[2], ".")
r.feature = "feature"
self.assertEqual(str(r).split("\t")[2], "feature")
r = self.tabix.fetch(parser=self.parser()).next()
r.feature = "."
self.assertEqual(r.feature, None)
self.assertEqual(str(r).split("\t")[2], ".")
def test_setting_strand_to_none_produces_dot(self):
r = self.tabix.fetch(parser=self.parser()).next()
r.strand = None
self.assertEqual(str(r).split("\t")[6], ".")
r.strand = "-"
self.assertEqual(str(r).split("\t")[6], "-")
r = self.tabix.fetch(parser=self.parser()).next()
r.strand = "."
self.assertEqual(r.strand, None)
self.assertEqual(str(r).split("\t")[6], ".")
def test_setting_score_to_none_produces_dot(self):
r = self.tabix.fetch(parser=self.parser()).next()
r.score = None
self.assertEqual(str(r).split("\t")[5], ".")
r.score = 12.0
self.assertEqual(str(r).split("\t")[5], "12.0")
r.score = -12.0
self.assertEqual(str(r).split("\t")[5], "-12.0")
r = self.tabix.fetch(parser=self.parser()).next()
r.score = "."
self.assertEqual(r.score, None)
self.assertEqual(str(r).split("\t")[5], ".")
r.score = 12
self.assertEqual(str(r).split("\t")[5], "12")
r.score = -12
self.assertEqual(str(r).split("\t")[5], "-12")
def test_asdict_contains_attributes(self):
r = self.tabix.fetch(parser=self.parser()).next()
d = r.to_dict()
c = self.compare[0]
s = self.build_attribute_string(d)
self.assertEqual(s, c[8])
def test_asdict_can_be_modified(self):
r = self.tabix.fetch(parser=self.parser()).next()
d = r.to_dict()
d["gene_id"] = "new_gene_id"
self.assertTrue("gene_id \"new_gene_id\"", str(r))
class TestGFF3(TestGTF):
parser = pysam.asGFF3
filename = os.path.join(TABIX_DATADIR, "example.gff3.gz")
def build_attribute_string(self, d):
"""build attribute string from dictionary d"""
s = ";".join(["{}={}".format(x, y) for (x, y) in d.items()]) + ";"
return s
def build_attribute_string(self, d):
"""build attribute string from dictionary d"""
s = ";".join(["{}={}".format(x, y) for (x, y) in d.items()]) + ";"
return s
def testRead(self):
for x, r in enumerate(self.tabix.fetch(parser=self.parser())):
c = self.compare[x]
self.assertEqual(len(c), len(r))
self.assertEqual(list(c), list(r))
self.assertEqual(c, str(r).split("\t"))
self.assertEqual(c[0], r.contig)
self.assertEqual("\t".join(map(str, c)),
str(r))
self.assertTrue(r.ID.startswith("MI00"))
def test_setting_fields(self):
for r in self.tabix.fetch(parser=self.parser()):
r.contig = r.contig + "_test_contig"
r.source = "test_source"
r.feature = "test_feature"
r.start += 10
r.end += 10
r.score = 20
r.strand = "+"
r.frame = 0
r.ID = "test"
sr = str(r)
self.assertTrue("test_contig" in sr)
self.assertTrue("test_source" in sr)
self.assertTrue("test_feature" in sr)
self.assertTrue("ID=test" in sr)
def test_setAttribute_makes_changes(self):
r = self.tabix.fetch(parser=self.parser()).next()
r.setAttribute("transcript_id", "abcd")
sr = str(r)
self.assertEqual(r.transcript_id, "abcd")
self.assertTrue("transcript_id=abcd" in sr)
def test_added_attribute_is_output(self):
r = self.tabix.fetch(parser=self.parser()).next()
r.new_int_attribute = 12
self.assertTrue("new_int_attribute=12" in str(r).split("\t")[8])
r.new_float_attribute = 12.0
self.assertTrue("new_float_attribute=12.0" in str(r).split("\t")[8])
r.new_text_attribute = "abc"
self.assertTrue("new_text_attribute=abc" in str(r).split("\t")[8])
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jmarsil/pygempick",
"score": 3
} |
#### File: pygempick/pygempick/modeling.py
```python
import numpy as np
import cv2
import random
import pandas as pd
import scipy.optimize as opt
import matplotlib.pyplot as plt
#import pygempick module(s)
import pygempick.core as core
import pygempick.spatialstats as spa
def draw(n, test_number, noise, images):
'''
function to draws test micrograph sets that will be used in subsequent
efficiency or separation tests.
1. Test number 1 is draw only circles, 2 is draw both circles and ellipses.
2. Noise if == 'yes' then, randomly distibuted gaussian noise will be drawn
according to mu1, sig1.
3. images are the number of images in the set - used with n which is number of
particles detected in the actual set to calulate the particle density of model
set.
'''
row = 776 #image height
col = 1018 #image width
radrange = np.arange(4,8,1)
mu = n/images #mean particle number across your images
sigma = np.sqrt(mu) #standard deviation of the mean from your data
##creates a new normal distribution based on your data (particles,images)
pick = np.random.normal(mu,sigma)
#height = np.arange(26,750) ##array of possible particle heights
#width = np.arange(26,992) ##array of possible particle widths
height = 750
width = 990
count = 0
circles = 0
elipses = 0
#mu1 = .05
#sig1 = .02
image = 255*np.ones((row,col), np.float32)
##convert to BGR
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if noise == 'yes':
mu1 = input('Input mean of Gaussian Distributed Noise')
sig1 = input('Input std of Gaussian Distributed Noise')
##adding random gaussian distributed noise to image...
for q in range(row):
for w in range(col):
image[q][w] = np.float32(np.int(255*np.random.normal(mu1,sig1)))
##change this value for high variability in background conditions..
if test_number == 1:
for j in range(np.int(pick)):
count+=1
##picks a random particle radius between 4 and 8 pixels
r = random.choice(radrange)
##chooses a random center position for the circle
#h = random.choice(height)
#w = random.choice(width)
w = np.random.uniform(20,width)
h = np.random.uniform(20,height)
#w = np.int(col*np.random.rand()) #first method used to choose random width/height...
##ensure that no particles are drawn on the edges of the image
##figure out how to void borders...
##draw a black circle
cv2.circle(image,(h,w), np.int(r), (0,0,0), -1)
image = (image).astype('uint8')
print('Complete')
return image, count
elif test_number == 2:
q = np.int(pick)
count = 0
while count <= q:
##picks a random particle radius between 4 and 8 pixels
axis = random.choice(radrange)
#N = width * height / 4
##chooses a random center position for the circle
w = np.int(np.random.uniform(20,width))
h = np.int(np.random.uniform(20,height))
##bernouli trial to draw either circle or elippse...
flip = np.random.rand()
if flip < 0.5:
#draw a circle
cv2.circle(image,(h,w), np.int(axis), (0,0,0), -1)
circles +=1
else:
#draw an elippse...
elipses += 1
cv2.ellipse(image,(h,w),(int(axis)*2,int(axis)),0,0,360,(0,0,0),-1)
count += 1
count = circles + elipses
image = (image).astype('uint8')
return image, int(circles), int(elipses)
def imgclass(inv_img):
'''
Uses a compressed grayscale image from cvt_color(RGB2GRAY) and returns
the intensity histogram and related bins position w/ im_class.
Can optimize this function to a greater extent.
Recieves following input from:
gray_img = cv2.cvtColor(orig_img, cv2.COLOR_RGB2GRAY)
'''
##can edit to make a histogram from of the pixle image intensities of the image...
hist, bins = np.histogram(inv_img.flatten(),256,[0,256])
#bincenters = 0.5*(bins[1:]+bins[:-1])
##apending max histogram intensities into a list
histx = np.argmax(hist)
if histx < 110:
im_class = 1
elif 110 <= histx < 120:
im_class = 2
elif 120 <= histx < 125:
im_class = 3
elif 125 <= histx < 130:
im_class= 4
elif 130 <= histx < 135:
im_class= 5
elif 135 <= histx < 140:
im_class= 6
elif 140 <= histx < 145:
im_class= 7
elif 145 <= histx < 150:
im_class= 8
elif 150 <= histx < 160:
im_class= 9
elif histx >= 160:
im_class= 10
return im_class, histx
def septest(p,image):
'''
let p be a range of integers ranging from [1, x], for the publication x
is set to 31
let image be a grayscale image produced after original image compression and
conversion to grayscale using OpenCv's function
image = gray_img = cv2.cvtColor(orig_img, cv2.COLOR_RGB2GRAY)
'''
detected_bin = np.zeros(len(p))
detected_lap = np.zeros(len(p))
detected_dog = np.zeros(len(p))
detected_log = np.zeros(len(p))
#the background conditions of various image sets will varry -
#go back and plot
for i in range(len(p)):
#same scaling factor as used by SIFT on the simple scale
output_bin, _ = core.bin_filt(p[i], image)
output_lap = core.hclap_filt(p[i],image, 'no')
output_dog = core.dog_filt(p[i],image)
output_log = core.hlog_filt(p[i], image, 'no')
keypoints_bin = core.pick(output_bin, 31, .83, .61 , .61, 0)
keypoints_lap = core.pick(output_lap, 31, .83, .61 , .61, 0)
keypoints_dog = core.pick(output_dog, 31, .83, .61 , .61, 0)
keypoints_log = core.pick(output_log, 31, .83, .61 , .61, 0)
if len(keypoints_lap) > 0:
detected_lap[i] = len(keypoints_lap)
else:
detected_lap[i] = 0
if len(keypoints_dog) > 0:
detected_dog[i] = len(keypoints_dog)
else:
detected_dog[i] = 0
if len(keypoints_bin)>0:
detected_bin[i] = len(keypoints_bin)
else:
detected_bin[i] = 0
if len(keypoints_log)>0:
detected_log[i] = len(keypoints_log)
else:
detected_log[i] = 0
#returns an array of the number of particles detected per filtering method...
#took out detected_dog for a more in depth test...
return detected_bin, detected_lap, detected_dog, detected_log
def septest2(p, image, hlogkey):
'''
let p be a range of integers ranging from [1, x], for the publication x
is set to 31
let image be a grayscale image produced after original image compression and
conversion to grayscale using OpenCv's function
hlogkey the keypoints of detected image fitered with HLOG filter - this ensures
faster particle detection since we aren't running the same filtering step more
than once!
'''
count = np.zeros(len(p))
duplicates = np.zeros(len(p))
keypoints2 = hlogkey
for i in range(len(p)):
output1 = core.hclap_filt(p[i], image, 'no')
keypoints1 = core.pick(output1, 31, .83, .5, .5, 0)
keypoints1, dup = core.key_filt(keypoints1, keypoints2)
if len(keypoints1) != 0 and len(keypoints2) ==0:
count[i] = len(keypoints1)
elif len(keypoints1) != 0 and len(keypoints2) !=0:
count[i] = len(keypoints1) + len(keypoints2)
elif len(keypoints1) == 0 and len(keypoints2) !=0:
count[i] = len(keypoints2)
else:
count[i] = 0
duplicates[i] = dup
return count, duplicates
def fitpcfs(data):
'''
data1 = pd.read_csv('/home/joseph/Documents/PHY479/pcf-dr5-error.csv', header=None, skiprows=1)
Function initially created to plot graphs from V30M and CD1 positve controls ()
please add modifications and change to suit your needs.
**Note: pcf-dr5-error.csv is a file outputted from keypoints2pcf()
look to that function to see how that output is formatted.
Output : built to produce one graph, with fitted curve for positive control(s).
Equation fitted to probability distribution for Complete Spatial Randomness of
the distribution of IGEM particles across EM micrographs.
'''
data = pd.DataFrame(data)
data = data.fillna(0)
#determine guess filtering parameters
pcfp1 = np.array([100.,1.,1.])
pcfp2 = np.array([10.,1., 1.])
x = data[2].values
y = data[0].values
dy = data[1].values
x1 = data[5].values
y1 = data[3].values
dy1 = data[4].values
popt1, pcov1 = opt.curve_fit(spa.pcf , x, y, p0 = pcfp1)
popt2, pcov2 = opt.curve_fit(spa.pcf , x1, y1, p0 = pcfp2)
popt1 = np.around(popt1, decimals=2)
popt2 = np.around(popt2, decimals=2)
#The probability of locating the N t h {\displaystyle N^{\mathrm {th} }}
#N^{{{\mathrm {th}}}} neighbor of any given point, at some radial distance r
#{\displaystyle r} r is:
plt.figure()
plt.title('Probability of Gold Particle Colocolization on TTR micrographs' )
#CSR of CD1 Micgrgrap set
plt.plot(x,y,'xr') #keypoints of CD1 micrographs
plt.plot(np.arange(0,110,1), spa.pcf(np.arange(0,110,1), popt1[0], popt1[1], popt1[2]),
'r-', label='CD1 CSR, N = {} +/- {}, L = {} +/- {}'.format(popt1[0],
np.around(np.sqrt(pcov1[0,0]), decimals=3),
popt1[1], np.around(np.sqrt(pcov1[1,1]), decimals=3)))
plt.errorbar(x, y, yerr=dy, fmt='xr')
plt.plot(x1,y1, 'og') ##keypoints of V30M micrographs
plt.plot(np.arange(0,110,1), spa.pcf(np.arange(0,110,1), popt2[0], popt2[1], popt2[2]),
'g-', label='V30M CSR, N = {} +/- {}, L = {} +/- {}'.format(popt2[0],
np.around(np.sqrt(pcov2[0,0]), decimals=3),
popt2[1], np.around(np.sqrt(pcov2[1,1]), decimals=3)))
plt.errorbar(x1, y1, yerr=dy1, fmt='og')
plt.legend(loc='best')
plt.grid(True)
plt.xlabel('Radius (r)')
#Probability Nth point at distance r
plt.ylabel('P(r)')
def fitpcf(data, N, p0, p1):
data = pd.DataFrame(data)
data = data.fillna(0)
#determine guess filtering parameters
pcfp1 = np.array([N,p0,p1])
x = data[2].values
y = data[0].values
dy = data[1].values
popt1, pcov1 = opt.curve_fit(spa.pcf , x, y, p0 = pcfp1)
popt1 = np.around(popt1, decimals=2)
plt.figure()
plt.title('Probability of Gold Particle Colocolization on TTR micrographs' )
#CSR of CD1 Micgrgrap set
plt.plot(x,y,'xr') #keypoints of CD1 micrographs
plt.plot(np.arange(0,210,1), spa.pcf(np.arange(0,210,1), popt1[0], popt1[1], popt1[2]),
'g-', label='V30M CSR, N = {} +/- {}, L = {} +/- {}'.format(popt1[0],
np.around(np.sqrt(pcov1[0,0]), decimals=3),
popt1[1], np.around(np.sqrt(pcov1[1,1]), decimals=3)))
plt.errorbar(x, y, yerr=dy, fmt='og')
plt.legend(loc='best')
plt.grid(True)
plt.xlabel('Radius (r)')
#Probability Nth point at distance r
plt.ylabel('P(r)')
plt.show()
return popt1, np.around(np.sqrt(pcov1), decimals=3)
``` |
{
"source": "jmartasek/hail",
"score": 2
} |
#### File: azure/driver/driver.py
```python
import asyncio
import logging
import os
from hailtop import aiotools
from hailtop.utils import periodically_call
from hailtop.aiocloud import aioazure
from gear import Database
from gear.cloud_config import get_azure_config
from ....driver.driver import CloudDriver
from ....driver.instance_collection import Pool, JobPrivateInstanceManager, InstanceCollectionManager
from ....inst_coll_config import InstanceCollectionConfigs
from .resource_manager import AzureResourceManager
from .regions import RegionMonitor
log = logging.getLogger('driver')
class AzureDriver(CloudDriver):
@staticmethod
async def create(app,
db: Database, # BORROWED
machine_name_prefix: str,
namespace: str,
inst_coll_configs: InstanceCollectionConfigs,
credentials_file: str,
task_manager: aiotools.BackgroundTaskManager, # BORROWED
) -> 'AzureDriver':
azure_config = get_azure_config()
subscription_id = azure_config.subscription_id
resource_group = azure_config.resource_group
region = azure_config.region
with open(os.environ['HAIL_SSH_PUBLIC_KEY']) as f:
ssh_public_key = f.read()
arm_client = aioazure.AzureResourceManagerClient(subscription_id, resource_group, credentials_file=credentials_file)
compute_client = aioazure.AzureComputeClient(subscription_id, resource_group, credentials_file=credentials_file)
resources_client = aioazure.AzureResourcesClient(subscription_id, credentials_file=credentials_file)
network_client = aioazure.AzureNetworkClient(subscription_id, resource_group, credentials_file=credentials_file)
region_monitor = await RegionMonitor.create(region)
inst_coll_manager = InstanceCollectionManager(db, machine_name_prefix, region_monitor)
resource_manager = AzureResourceManager(subscription_id, resource_group, ssh_public_key, arm_client, compute_client)
create_pools_coros = [
Pool.create(app,
db,
inst_coll_manager,
resource_manager,
machine_name_prefix,
config,
app['async_worker_pool'],
task_manager)
for pool_name, config in inst_coll_configs.name_pool_config.items()
]
jpim, *_ = await asyncio.gather(
JobPrivateInstanceManager.create(
app, db, inst_coll_manager, resource_manager, machine_name_prefix, inst_coll_configs.jpim_config, task_manager),
*create_pools_coros)
driver = AzureDriver(db,
machine_name_prefix,
arm_client,
compute_client,
resources_client,
network_client,
subscription_id,
resource_group,
namespace,
region_monitor,
inst_coll_manager,
jpim)
task_manager.ensure_future(periodically_call(60, driver.delete_orphaned_nics))
task_manager.ensure_future(periodically_call(60, driver.delete_orphaned_public_ips))
return driver
def __init__(self,
db: Database,
machine_name_prefix: str,
arm_client: aioazure.AzureResourceManagerClient,
compute_client: aioazure.AzureComputeClient,
resources_client: aioazure.AzureResourcesClient,
network_client: aioazure.AzureNetworkClient,
subscription_id: str,
resource_group: str,
namespace: str,
region_monitor: RegionMonitor,
inst_coll_manager: InstanceCollectionManager,
job_private_inst_manager: JobPrivateInstanceManager):
self.db = db
self.machine_name_prefix = machine_name_prefix
self.arm_client = arm_client
self.compute_client = compute_client
self.resources_client = resources_client
self.network_client = network_client
self.subscription_id = subscription_id
self.resource_group = resource_group
self.namespace = namespace
self.region_monitor = region_monitor
self.inst_coll_manager = inst_coll_manager
self.job_private_inst_manager = job_private_inst_manager
async def shutdown(self) -> None:
try:
await self.arm_client.close()
finally:
try:
await self.compute_client.close()
finally:
try:
await self.resources_client.close()
finally:
await self.network_client.close()
def _resource_is_orphaned(self, resource_name: str) -> bool:
instance_name = resource_name.rsplit('-', maxsplit=1)[0]
return self.inst_coll_manager.get_instance(instance_name) is None
async def delete_orphaned_nics(self) -> None:
log.info('deleting orphaned nics')
async for nic_name in self.resources_client.list_nic_names(self.machine_name_prefix):
if self._resource_is_orphaned(nic_name):
try:
await self.network_client.delete_nic(nic_name, ignore_not_found=True)
except asyncio.CancelledError:
raise
except Exception:
log.exception(f'while deleting orphaned nic {nic_name}')
async def delete_orphaned_public_ips(self) -> None:
log.info('deleting orphaned public ips')
async for public_ip_name in self.resources_client.list_public_ip_names(self.machine_name_prefix):
if self._resource_is_orphaned(public_ip_name):
try:
await self.network_client.delete_public_ip(public_ip_name, ignore_not_found=True)
except asyncio.CancelledError:
raise
except Exception:
log.exception(f'while deleting orphaned public ip {public_ip_name}')
```
#### File: azure/worker/utils.py
```python
import os
from typing import Dict
import aiohttp
from hailtop.utils import request_retry_transient_errors, time_msecs
acr_refresh_token = None
expiration_time = None
async def get_aad_access_token(session: aiohttp.ClientSession) -> str:
# https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#get-a-token-using-http
params = {
'api-version': '2018-02-01',
'resource': 'https://management.azure.com/'
}
async with await request_retry_transient_errors(
session,
'GET',
'http://169.254.169.254/metadata/identity/oauth2/token',
headers={'Metadata': 'true'},
params=params,
timeout=aiohttp.ClientTimeout(total=60)
) as resp:
access_token = (await resp.json())['access_token']
return access_token
async def get_acr_refresh_token(session: aiohttp.ClientSession, acr_url: str, aad_access_token: str) -> str:
# https://github.com/Azure/acr/blob/main/docs/AAD-OAuth.md#calling-post-oauth2exchange-to-get-an-acr-refresh-token
data = {
'grant_type': 'access_token',
'service': acr_url,
'access_token': aad_access_token
}
async with await request_retry_transient_errors(
session,
'POST',
f'https://{acr_url}/oauth2/exchange',
headers={'Content-Type': 'application/x-www-form-urlencoded'},
data=data,
timeout=aiohttp.ClientTimeout(total=60)
) as resp:
refresh_token = (await resp.json())['refresh_token']
return refresh_token
async def azure_worker_access_token(session: aiohttp.ClientSession) -> Dict[str, str]:
global acr_refresh_token, expiration_time
if acr_refresh_token is None or time_msecs() >= expiration_time:
acr_url = os.environ['DOCKER_PREFIX']
assert acr_url.endswith('azurecr.io'), acr_url
aad_access_token = await get_aad_access_token(session)
acr_refresh_token = await get_acr_refresh_token(session, acr_url, aad_access_token)
expiration_time = time_msecs() + 60 * 60 * 1000 # token expires in 3 hours so we refresh after 1 hour
return {'username': '0<PASSWORD>', 'password': <PASSWORD>}
```
#### File: driver/instance_collection/pool.py
```python
from typing import Optional
import sortedcontainers
import logging
import asyncio
import random
import collections
from gear import Database
from hailtop import aiotools
from hailtop.utils import (
secret_alnum_string,
retry_long_running,
run_if_changed,
time_msecs,
WaitableSharedPool,
AsyncWorkerPool,
Notice,
periodically_call,
)
from ...batch_configuration import STANDING_WORKER_MAX_IDLE_TIME_MSECS
from ...inst_coll_config import PoolConfig
from ...utils import Box, ExceededSharesCounter
from ..instance import Instance
from ..resource_manager import CloudResourceManager
from ..job import schedule_job
from .base import InstanceCollectionManager, InstanceCollection
log = logging.getLogger('pool')
class Pool(InstanceCollection):
@staticmethod
async def create(app,
db: Database, # BORROWED
inst_coll_manager: InstanceCollectionManager,
resource_manager: CloudResourceManager,
machine_name_prefix: str,
config: PoolConfig,
async_worker_pool: AsyncWorkerPool, # BORROWED
task_manager: aiotools.BackgroundTaskManager
) -> 'Pool':
pool = Pool(
app, db, inst_coll_manager, resource_manager, machine_name_prefix, config, async_worker_pool, task_manager)
log.info(f'initializing {pool}')
async for record in db.select_and_fetchall(
'SELECT * FROM instances WHERE removed = 0 AND inst_coll = %s;', (pool.name,)
):
pool.add_instance(Instance.from_record(app, pool, record))
return pool
def __init__(self,
app,
db: Database, # BORROWED
inst_coll_manager: InstanceCollectionManager,
resource_manager: CloudResourceManager,
machine_name_prefix: str,
config: PoolConfig,
async_worker_pool: AsyncWorkerPool, # BORROWED
task_manager: aiotools.BackgroundTaskManager, # BORROWED
):
super().__init__(db,
inst_coll_manager,
resource_manager,
config.cloud,
config.name,
machine_name_prefix,
is_pool=True,
max_instances=config.max_instances,
max_live_instances=config.max_live_instances,
task_manager=task_manager)
self.app = app
self.inst_coll_manager = inst_coll_manager
global_scheduler_state_changed: Notice = self.app['scheduler_state_changed']
self.scheduler_state_changed = global_scheduler_state_changed.subscribe()
self.scheduler = PoolScheduler(self.app, self, async_worker_pool, task_manager)
self.healthy_instances_by_free_cores = sortedcontainers.SortedSet(key=lambda instance: instance.free_cores_mcpu)
self.worker_type = config.worker_type
self.worker_cores = config.worker_cores
self.worker_local_ssd_data_disk = config.worker_local_ssd_data_disk
self.worker_external_ssd_data_disk_size_gb = config.worker_external_ssd_data_disk_size_gb
self.enable_standing_worker = config.enable_standing_worker
self.standing_worker_cores = config.standing_worker_cores
self.boot_disk_size_gb = config.boot_disk_size_gb
self.data_disk_size_gb = config.data_disk_size_gb
self.data_disk_size_standing_gb = config.data_disk_size_standing_gb
task_manager.ensure_future(self.control_loop())
@property
def local_ssd_data_disk(self) -> bool:
return self.worker_local_ssd_data_disk
def _default_location(self) -> str:
return self.inst_coll_manager.location_monitor.default_location()
def config(self):
return {
'name': self.name,
'worker_type': self.worker_type,
'worker_cores': self.worker_cores,
'boot_disk_size_gb': self.boot_disk_size_gb,
'worker_local_ssd_data_disk': self.worker_local_ssd_data_disk,
'worker_external_ssd_data_disk_size_gb': self.worker_external_ssd_data_disk_size_gb,
'enable_standing_worker': self.enable_standing_worker,
'standing_worker_cores': self.standing_worker_cores,
'max_instances': self.max_instances,
'max_live_instances': self.max_live_instances,
}
def configure(self, pool_config: PoolConfig):
assert self.name == pool_config.name
assert self.cloud == pool_config.cloud
assert self.worker_type == pool_config.worker_type
self.worker_cores = pool_config.worker_cores
self.worker_local_ssd_data_disk = pool_config.worker_local_ssd_data_disk
self.worker_external_ssd_data_disk_size_gb = pool_config.worker_external_ssd_data_disk_size_gb
self.enable_standing_worker = pool_config.enable_standing_worker
self.standing_worker_cores = pool_config.standing_worker_cores
self.boot_disk_size_gb = pool_config.boot_disk_size_gb
self.data_disk_size_gb = pool_config.data_disk_size_gb
self.data_disk_size_standing_gb = pool_config.data_disk_size_standing_gb
self.max_instances = pool_config.max_instances
self.max_live_instances = pool_config.max_live_instances
def adjust_for_remove_instance(self, instance):
super().adjust_for_remove_instance(instance)
if instance in self.healthy_instances_by_free_cores:
self.healthy_instances_by_free_cores.remove(instance)
def adjust_for_add_instance(self, instance):
super().adjust_for_add_instance(instance)
if instance.state == 'active' and instance.failed_request_count <= 1:
self.healthy_instances_by_free_cores.add(instance)
def get_instance(self, user, cores_mcpu):
i = self.healthy_instances_by_free_cores.bisect_key_left(cores_mcpu)
while i < len(self.healthy_instances_by_free_cores):
instance = self.healthy_instances_by_free_cores[i]
assert cores_mcpu <= instance.free_cores_mcpu
if user != 'ci' or (user == 'ci' and instance.location == self._default_location()):
return instance
i += 1
histogram = collections.defaultdict(int)
for instance in self.healthy_instances_by_free_cores:
histogram[instance.free_cores_mcpu] += 1
log.info(f'schedule {self}: no viable instances for {cores_mcpu}: {histogram}')
return None
async def create_instance(self,
cores: int,
data_disk_size_gb: int,
max_idle_time_msecs: Optional[int] = None,
location: Optional[str] = None,
):
machine_type = self.resource_manager.machine_type(cores, self.worker_type, self.worker_local_ssd_data_disk)
_, _ = await self._create_instance(
app=self.app,
cores=cores,
machine_type=machine_type,
job_private=False,
location=location,
preemptible=True,
max_idle_time_msecs=max_idle_time_msecs,
local_ssd_data_disk=self.worker_local_ssd_data_disk,
data_disk_size_gb=data_disk_size_gb,
boot_disk_size_gb=self.boot_disk_size_gb
)
async def create_instances_from_ready_cores(self, ready_cores_mcpu, location=None):
n_live_instances = self.n_instances_by_state['pending'] + self.n_instances_by_state['active']
if location is None:
live_free_cores_mcpu = self.live_free_cores_mcpu
else:
live_free_cores_mcpu = self.live_free_cores_mcpu_by_location[location]
instances_needed = (ready_cores_mcpu - live_free_cores_mcpu + (self.worker_cores * 1000) - 1) // (
self.worker_cores * 1000
)
instances_needed = min(
instances_needed,
self.max_live_instances - n_live_instances,
self.max_instances - self.n_instances,
# 20 queries/s; our GCE long-run quota
300,
# n * 16 cores / 15s = excess_scheduling_rate/s = 10/s => n ~= 10
10,
)
if instances_needed > 0:
log.info(f'creating {instances_needed} new instances')
# parallelism will be bounded by thread pool
await asyncio.gather(*[
self.create_instance(
cores=self.worker_cores,
data_disk_size_gb=self.data_disk_size_gb,
location=location
)
for _ in range(instances_needed)])
async def create_instances(self):
if self.app['frozen']:
log.info(f'not creating instances for {self}; batch is frozen')
return
ready_cores_mcpu_per_user = self.db.select_and_fetchall(
'''
SELECT user,
CAST(COALESCE(SUM(ready_cores_mcpu), 0) AS SIGNED) AS ready_cores_mcpu
FROM user_inst_coll_resources
WHERE inst_coll = %s
GROUP BY user;
''',
(self.name,),
)
if ready_cores_mcpu_per_user is None:
ready_cores_mcpu_per_user = {}
else:
ready_cores_mcpu_per_user = {r['user']: r['ready_cores_mcpu'] async for r in ready_cores_mcpu_per_user}
ready_cores_mcpu = sum(ready_cores_mcpu_per_user.values())
free_cores_mcpu = sum([worker.free_cores_mcpu for worker in self.healthy_instances_by_free_cores])
free_cores = free_cores_mcpu / 1000
log.info(
f'{self} n_instances {self.n_instances} {self.n_instances_by_state}'
f' free_cores {free_cores} live_free_cores {self.live_free_cores_mcpu / 1000}'
f' ready_cores {ready_cores_mcpu / 1000}'
)
if ready_cores_mcpu > 0 and free_cores < 500:
await self.create_instances_from_ready_cores(ready_cores_mcpu)
default_location = self._default_location()
ci_ready_cores_mcpu = ready_cores_mcpu_per_user.get('ci', 0)
if ci_ready_cores_mcpu > 0 and self.live_free_cores_mcpu_by_location[default_location] == 0:
await self.create_instances_from_ready_cores(ci_ready_cores_mcpu, location=default_location)
n_live_instances = self.n_instances_by_state['pending'] + self.n_instances_by_state['active']
if self.enable_standing_worker and n_live_instances == 0 and self.max_instances > 0:
await self.create_instance(
cores=self.standing_worker_cores,
data_disk_size_gb=self.data_disk_size_standing_gb,
max_idle_time_msecs=STANDING_WORKER_MAX_IDLE_TIME_MSECS
)
async def control_loop(self):
await periodically_call(15, self.create_instances)
def __str__(self):
return f'pool {self.name}'
class PoolScheduler:
def __init__(self,
app,
pool: Pool,
async_worker_pool: AsyncWorkerPool, # BORROWED
task_manager: aiotools.BackgroundTaskManager, # BORROWED
):
self.app = app
self.scheduler_state_changed = pool.scheduler_state_changed
self.db: Database = app['db']
self.pool = pool
self.async_worker_pool = async_worker_pool
self.exceeded_shares_counter = ExceededSharesCounter()
task_manager.ensure_future(
retry_long_running('schedule_loop', run_if_changed, self.scheduler_state_changed, self.schedule_loop_body)
)
async def compute_fair_share(self):
free_cores_mcpu = sum([worker.free_cores_mcpu for worker in self.pool.healthy_instances_by_free_cores])
user_running_cores_mcpu = {}
user_total_cores_mcpu = {}
result = {}
pending_users_by_running_cores = sortedcontainers.SortedSet(key=lambda user: user_running_cores_mcpu[user])
allocating_users_by_total_cores = sortedcontainers.SortedSet(key=lambda user: user_total_cores_mcpu[user])
records = self.db.execute_and_fetchall(
'''
SELECT user,
CAST(COALESCE(SUM(n_ready_jobs), 0) AS SIGNED) AS n_ready_jobs,
CAST(COALESCE(SUM(ready_cores_mcpu), 0) AS SIGNED) AS ready_cores_mcpu,
CAST(COALESCE(SUM(n_running_jobs), 0) AS SIGNED) AS n_running_jobs,
CAST(COALESCE(SUM(running_cores_mcpu), 0) AS SIGNED) AS running_cores_mcpu
FROM user_inst_coll_resources
WHERE inst_coll = %s
GROUP BY user
HAVING n_ready_jobs + n_running_jobs > 0;
''',
(self.pool.name,),
timer_description=f'in compute_fair_share for {self.pool.name}: aggregate user_inst_coll_resources',
)
async for record in records:
user = record['user']
user_running_cores_mcpu[user] = record['running_cores_mcpu']
user_total_cores_mcpu[user] = record['running_cores_mcpu'] + record['ready_cores_mcpu']
pending_users_by_running_cores.add(user)
record['allocated_cores_mcpu'] = 0
result[user] = record
def allocate_cores(user, mark):
result[user]['allocated_cores_mcpu'] = int(mark - user_running_cores_mcpu[user] + 0.5)
mark = 0
while free_cores_mcpu > 0 and (pending_users_by_running_cores or allocating_users_by_total_cores):
lowest_running = None
lowest_total = None
if pending_users_by_running_cores:
lowest_running_user = pending_users_by_running_cores[0]
lowest_running = user_running_cores_mcpu[lowest_running_user]
if lowest_running == mark:
pending_users_by_running_cores.remove(lowest_running_user)
allocating_users_by_total_cores.add(lowest_running_user)
continue
if allocating_users_by_total_cores:
lowest_total_user = allocating_users_by_total_cores[0]
lowest_total = user_total_cores_mcpu[lowest_total_user]
if lowest_total == mark:
allocating_users_by_total_cores.remove(lowest_total_user)
allocate_cores(lowest_total_user, mark)
continue
allocation = min([c for c in [lowest_running, lowest_total] if c is not None])
n_allocating_users = len(allocating_users_by_total_cores)
cores_to_allocate = n_allocating_users * (allocation - mark)
if cores_to_allocate > free_cores_mcpu:
mark += int(free_cores_mcpu / n_allocating_users + 0.5)
free_cores_mcpu = 0
break
mark = allocation
free_cores_mcpu -= cores_to_allocate
for user in allocating_users_by_total_cores:
allocate_cores(user, mark)
return result
async def schedule_loop_body(self):
if self.app['frozen']:
log.info(f'not scheduling any jobs for {self.pool}; batch is frozen')
return True
log.info(f'schedule {self.pool}: starting')
start = time_msecs()
n_scheduled = 0
user_resources = await self.compute_fair_share()
total = sum(resources['allocated_cores_mcpu'] for resources in user_resources.values())
if not total:
log.info(f'schedule {self.pool}: no allocated cores')
should_wait = True
return should_wait
user_share = {
user: max(int(300 * resources['allocated_cores_mcpu'] / total + 0.5), 20)
for user, resources in user_resources.items()
}
async def user_runnable_jobs(user, remaining):
async for batch in self.db.select_and_fetchall(
'''
SELECT id, cancelled, userdata, user, format_version
FROM batches
WHERE user = %s AND `state` = 'running';
''',
(user,),
timer_description=f'in schedule {self.pool}: get {user} running batches',
):
async for record in self.db.select_and_fetchall(
'''
SELECT job_id, spec, cores_mcpu
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_inst_coll_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 1 AND inst_coll = %s
LIMIT %s;
''',
(batch['id'], self.pool.name, remaining.value),
timer_description=f'in schedule {self.pool}: get {user} batch {batch["id"]} runnable jobs (1)',
):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
if not batch['cancelled']:
async for record in self.db.select_and_fetchall(
'''
SELECT job_id, spec, cores_mcpu
FROM jobs FORCE INDEX(jobs_batch_id_state_always_run_cancelled)
WHERE batch_id = %s AND state = 'Ready' AND always_run = 0 AND inst_coll = %s AND cancelled = 0
LIMIT %s;
''',
(batch['id'], self.pool.name, remaining.value),
timer_description=f'in schedule {self.pool}: get {user} batch {batch["id"]} runnable jobs (2)',
):
record['batch_id'] = batch['id']
record['userdata'] = batch['userdata']
record['user'] = batch['user']
record['format_version'] = batch['format_version']
yield record
waitable_pool = WaitableSharedPool(self.async_worker_pool)
should_wait = True
for user, resources in user_resources.items():
allocated_cores_mcpu = resources['allocated_cores_mcpu']
if allocated_cores_mcpu == 0:
continue
scheduled_cores_mcpu = 0
share = user_share[user]
log.info(f'schedule {self.pool}: user-share: {user}: {allocated_cores_mcpu} {share}')
remaining = Box(share)
async for record in user_runnable_jobs(user, remaining):
batch_id = record['batch_id']
job_id = record['job_id']
id = (batch_id, job_id)
attempt_id = secret_alnum_string(6)
record['attempt_id'] = attempt_id
if scheduled_cores_mcpu + record['cores_mcpu'] > allocated_cores_mcpu:
if random.random() > self.exceeded_shares_counter.rate():
self.exceeded_shares_counter.push(True)
self.scheduler_state_changed.set()
break
self.exceeded_shares_counter.push(False)
instance = self.pool.get_instance(user, record['cores_mcpu'])
if instance:
instance.adjust_free_cores_in_memory(-record['cores_mcpu'])
scheduled_cores_mcpu += record['cores_mcpu']
n_scheduled += 1
should_wait = False
async def schedule_with_error_handling(app, record, id, instance):
try:
await schedule_job(app, record, instance)
except Exception:
log.info(f'scheduling job {id} on {instance} for {self.pool}', exc_info=True)
await waitable_pool.call(schedule_with_error_handling, self.app, record, id, instance)
remaining.value -= 1
if remaining.value <= 0:
break
await waitable_pool.wait()
end = time_msecs()
log.info(f'schedule: scheduled {n_scheduled} jobs in {end - start}ms for {self.pool}')
return should_wait
```
#### File: aiotools/fs/fs.py
```python
from typing import Any, AsyncContextManager, Optional, Type, Set, AsyncIterator
from types import TracebackType
import abc
import asyncio
from hailtop.utils import retry_transient_errors, OnlineBoundedGather2
from .stream import ReadableStream, WritableStream
from .exceptions import FileAndDirectoryError
class FileStatus(abc.ABC):
@abc.abstractmethod
async def size(self) -> int:
pass
@abc.abstractmethod
async def __getitem__(self, key: str) -> Any:
pass
class FileListEntry(abc.ABC):
@abc.abstractmethod
def name(self) -> str:
pass
@abc.abstractmethod
async def url(self) -> str:
pass
@abc.abstractmethod
def url_maybe_trailing_slash(self) -> str:
pass
@abc.abstractmethod
async def is_file(self) -> bool:
pass
@abc.abstractmethod
async def is_dir(self) -> bool:
pass
@abc.abstractmethod
async def status(self) -> FileStatus:
pass
class MultiPartCreate(abc.ABC):
@abc.abstractmethod
async def create_part(self, number: int, start: int, size_hint: Optional[int] = None) -> AsyncContextManager[WritableStream]:
pass
@abc.abstractmethod
async def __aenter__(self) -> 'MultiPartCreate':
pass
@abc.abstractmethod
async def __aexit__(self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
pass
class AsyncFS(abc.ABC):
FILE = 'file'
DIR = 'dir'
@property
@abc.abstractmethod
def schemes(self) -> Set[str]:
pass
@abc.abstractmethod
async def open(self, url: str) -> ReadableStream:
pass
@abc.abstractmethod
async def open_from(self, url: str, start: int) -> ReadableStream:
pass
@abc.abstractmethod
async def create(self, url: str, *, retry_writes: bool = True) -> AsyncContextManager[WritableStream]:
pass
@abc.abstractmethod
async def multi_part_create(
self,
sema: asyncio.Semaphore,
url: str,
num_parts: int) -> MultiPartCreate:
pass
@abc.abstractmethod
async def mkdir(self, url: str) -> None:
pass
@abc.abstractmethod
async def makedirs(self, url: str, exist_ok: bool = False) -> None:
pass
@abc.abstractmethod
async def statfile(self, url: str) -> FileStatus:
pass
@abc.abstractmethod
async def listfiles(self, url: str, recursive: bool = False) -> AsyncIterator[FileListEntry]:
pass
@abc.abstractmethod
async def staturl(self, url: str) -> str:
pass
async def _staturl_parallel_isfile_isdir(self, url: str) -> str:
assert not url.endswith('/')
async def with_exception(f, *args, **kwargs):
try:
return (await f(*args, **kwargs)), None
except Exception as e:
return None, e
[(is_file, isfile_exc), (is_dir, isdir_exc)] = await asyncio.gather(
with_exception(self.isfile, url), with_exception(self.isdir, url + '/'))
# raise exception deterministically
if isfile_exc:
raise isfile_exc
if isdir_exc:
raise isdir_exc
if is_file:
if is_dir:
raise FileAndDirectoryError(url)
return AsyncFS.FILE
if is_dir:
return AsyncFS.DIR
raise FileNotFoundError(url)
@abc.abstractmethod
async def isfile(self, url: str) -> bool:
pass
@abc.abstractmethod
async def isdir(self, url: str) -> bool:
pass
@abc.abstractmethod
async def remove(self, url: str) -> None:
pass
async def _remove_doesnt_exist_ok(self, url):
try:
await self.remove(url)
except FileNotFoundError:
pass
@abc.abstractmethod
async def rmtree(self, sema: Optional[asyncio.Semaphore], url: str) -> None:
pass
async def _rmtree_with_recursive_listfiles(self, sema: asyncio.Semaphore, url: str) -> None:
async with OnlineBoundedGather2(sema) as pool:
try:
it = await self.listfiles(url, recursive=True)
except FileNotFoundError:
return
async for entry in it:
await pool.call(self._remove_doesnt_exist_ok, await entry.url())
async def touch(self, url: str) -> None:
async with await self.create(url):
pass
async def read(self, url: str) -> bytes:
async with await self.open(url) as f:
return await f.read()
async def read_from(self, url: str, start: int) -> bytes:
async with await self.open_from(url, start) as f:
return await f.read()
async def read_range(self, url: str, start: int, end: int) -> bytes:
n = (end - start) + 1
async with await self.open_from(url, start) as f:
return await f.readexactly(n)
async def write(self, url: str, data: bytes) -> None:
async def _write() -> None:
async with await self.create(url, retry_writes=False) as f:
await f.write(data)
await retry_transient_errors(_write)
async def exists(self, url: str) -> bool:
try:
await self.statfile(url)
except FileNotFoundError:
return False
else:
return True
async def close(self) -> None:
pass
async def __aenter__(self) -> 'AsyncFS':
return self
async def __aexit__(self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
await self.close()
def copy_part_size(self, url: str) -> int: # pylint: disable=unused-argument,no-self-use
'''Part size when copying using multi-part uploads. The part size of
the destination filesystem is used.'''
return 128 * 1024 * 1024
``` |
{
"source": "j-martens/azure-cli-extensions",
"score": 2
} |
#### File: aks-preview/azext_aks_preview/commands.py
```python
from azure.cli.core.commands import CliCommandType
from ._client_factory import cf_managed_clusters
from ._client_factory import cf_agent_pools
from ._format import aks_show_table_format
from ._format import aks_agentpool_show_table_format
def load_command_table(self, _):
managed_clusters_sdk = CliCommandType(
operations_tmpl='azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.'
'operations.managed_clusters_operations#ManagedClustersOperations.{}',
client_factory=cf_managed_clusters
)
agent_pools_sdk = CliCommandType(
operations_tmpl='azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.'
'operations.agent_pools_operations#AgentPoolsOperations.{}',
client_factory=cf_managed_clusters
)
# AKS commands
with self.command_group('aks', managed_clusters_sdk, client_factory=cf_managed_clusters) as g:
g.custom_command('create', 'aks_create', supports_no_wait=True)
g.custom_command('update', 'aks_update', supports_no_wait=True)
g.custom_command('scale', 'aks_scale', supports_no_wait=True)
g.custom_command('disable-addons', 'aks_disable_addons', supports_no_wait=True)
g.custom_command('enable-addons', 'aks_enable_addons', supports_no_wait=True)
g.custom_show_command('show', 'aks_show', table_transformer=aks_show_table_format)
g.custom_command('upgrade', 'aks_upgrade', supports_no_wait=True,
confirmation='Kubernetes may be unavailable during cluster upgrades.\n' +
'Are you sure you want to perform this operation?')
g.wait_command('wait')
# AKS agent pool commands
with self.command_group('aks nodepool', agent_pools_sdk, client_factory=cf_agent_pools) as g:
g.custom_command('list', 'aks_agentpool_list')
g.custom_show_command('show', 'aks_agentpool_show', table_transformer=aks_agentpool_show_table_format)
g.custom_command('add', 'aks_agentpool_add', supports_no_wait=True)
g.custom_command('scale', 'aks_agentpool_scale', supports_no_wait=True)
g.custom_command('upgrade', 'aks_agentpool_upgrade', supports_no_wait=True)
g.custom_command('delete', 'aks_agentpool_delete', supports_no_wait=True)
```
#### File: vendored_sdks/models/redirect_configuration_py3.py
```python
from .route_configuration_py3 import RouteConfiguration
class RedirectConfiguration(RouteConfiguration):
"""Describes Redirect Route.
All required parameters must be populated in order to send to Azure.
:param odatatype: Required. Constant filled by server.
:type odatatype: str
:param redirect_type: The redirect type the rule will use when redirecting
traffic. Possible values include: 'Moved', 'Found', 'TemporaryRedirect',
'PermanentRedirect'
:type redirect_type: str or
~azure.mgmt.frontdoor.models.FrontDoorRedirectType
:param redirect_protocol: The protocol of the destination to where the
traffic is redirected. Possible values include: 'HttpOnly', 'HttpsOnly',
'MatchRequest'
:type redirect_protocol: str or
~azure.mgmt.frontdoor.models.FrontDoorRedirectProtocol
:param custom_host: Host to redirect. Leave empty to use use the incoming
host as the destination host.
:type custom_host: str
:param custom_path: The full path to redirect. Path cannot be empty and
must start with /. Leave empty to use the incoming path as destination
path.
:type custom_path: str
:param custom_fragment: Fragment to add to the redirect URL. Fragment is
the part of the URL that comes after #. Do not include the #.
:type custom_fragment: str
:param custom_query_string: The set of query strings to be placed in the
redirect URL. Setting this value would replace any existing query string;
leave empty to preserve the incoming query string. Query string must be in
<key>=<value> format. The first ? and & will be added automatically so do
not include them in the front, but do separate multiple query strings with
&.
:type custom_query_string: str
"""
_validation = {
'odatatype': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'redirect_type': {'key': 'redirectType', 'type': 'str'},
'redirect_protocol': {'key': 'redirectProtocol', 'type': 'str'},
'custom_host': {'key': 'customHost', 'type': 'str'},
'custom_path': {'key': 'customPath', 'type': 'str'},
'custom_fragment': {'key': 'customFragment', 'type': 'str'},
'custom_query_string': {'key': 'customQueryString', 'type': 'str'},
}
def __init__(self, *, redirect_type=None, redirect_protocol=None, custom_host: str=None, custom_path: str=None, custom_fragment: str=None, custom_query_string: str=None, **kwargs) -> None:
super(RedirectConfiguration, self).__init__(**kwargs)
self.redirect_type = redirect_type
self.redirect_protocol = redirect_protocol
self.custom_host = custom_host
self.custom_path = custom_path
self.custom_fragment = custom_fragment
self.custom_query_string = custom_query_string
self.odatatype = '#Microsoft.Azure.FrontDoor.Models.FrontdoorRedirectConfiguration'
```
#### File: tests/latest/test_snapshot_commands_ext.py
```python
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer
POOL_DEFAULT = "--service-level 'Premium' --size 4398046511104"
VOLUME_DEFAULT = "--service-level 'Premium' --usage-threshold 107374182400"
# No tidy up of tests required. The resource group is automatically removed
class AzureNetAppFilesExtSnapshotServiceScenarioTest(ScenarioTest):
def setup_vnet(self, rg, vnet_name, subnet_name):
self.cmd("az network vnet create -n %s --resource-group %s -l westus2 --address-prefix 10.12.0.0/16" % (vnet_name, rg))
self.cmd("az network vnet subnet create -n %s --vnet-name %s --address-prefixes '10.12.0.0/24' --delegations 'Microsoft.Netapp/volumes' -g %s" % (subnet_name, vnet_name, rg))
def current_subscription(self):
subs = self.cmd("az account show").get_output_in_json()
return subs['id']
def create_volume(self, account_name, pool_name, volume_name1, rg, tags=None):
vnet_name = self.create_random_name(prefix='cli-vnet-', length=24)
creation_token = volume_name1
subnet_name = self.create_random_name(prefix='cli-subnet-', length=16)
subnet_id = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s/subnets/%s" % (self.current_subscription(), rg, vnet_name, subnet_name)
tag = "--tags '%s'" % tags if tags is not None else ""
self.setup_vnet(rg, vnet_name, subnet_name)
self.cmd("netappfiles account create -g %s -a '%s' -l 'westus2'" % (rg, account_name)).get_output_in_json()
self.cmd("netappfiles pool create -g %s -a %s -p %s -l 'westus2' %s %s" % (rg, account_name, pool_name, POOL_DEFAULT, tag)).get_output_in_json()
volume1 = self.cmd("netappfiles volume create --resource-group %s --account-name %s --pool-name %s --volume-name %s -l 'westus2' %s --creation-token %s --subnet-id %s %s" % (rg, account_name, pool_name, volume_name1, VOLUME_DEFAULT, creation_token, subnet_id, tag)).get_output_in_json()
return volume1
@ResourceGroupPreparer()
def test_ext_create_delete_snapshots(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
snapshot_name = self.create_random_name(prefix='cli-sn-', length=24)
rg = '{rg}'
volume = self.create_volume(account_name, pool_name, volume_name, rg)
snapshot = self.cmd("az netappfiles snapshot create -g %s -a %s -p %s -v %s -s %s -l 'westus2' --file-system-id %s" % (rg, account_name, pool_name, volume_name, snapshot_name, volume['fileSystemId'])).get_output_in_json()
assert snapshot['name'] == account_name + '/' + pool_name + '/' + volume_name + '/' + snapshot_name
snapshot_list = self.cmd("az netappfiles snapshot list --resource-group %s --account-name %s --pool-name %s --volume-name %s" % (rg, account_name, pool_name, volume_name)).get_output_in_json()
assert len(snapshot_list) == 1
self.cmd("az netappfiles snapshot delete -g %s -a %s -p %s -v %s -s %s" % (rg, account_name, pool_name, volume_name, snapshot_name))
snapshot_list = self.cmd("az netappfiles snapshot list --resource-group %s --account-name %s --pool-name %s --volume-name %s" % (rg, account_name, pool_name, volume_name)).get_output_in_json()
assert len(snapshot_list) == 0
@ResourceGroupPreparer()
def test_ext_list_snapshots(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
snapshot_name1 = self.create_random_name(prefix='cli-sn-', length=24)
snapshot_name2 = self.create_random_name(prefix='cli-sn-', length=24)
volume = self.create_volume(account_name, pool_name, volume_name, '{rg}')
self.cmd("az netappfiles snapshot create -g {rg} -a %s -p %s -v %s -s %s -l 'westus2' --file-system-id %s" % (account_name, pool_name, volume_name, snapshot_name1, volume['fileSystemId'])).get_output_in_json()
self.cmd("az netappfiles snapshot create -g {rg} -a %s -p %s -v %s -s %s -l 'westus2' --file-system-id %s" % (account_name, pool_name, volume_name, snapshot_name2, volume['fileSystemId'])).get_output_in_json()
snapshot_list = self.cmd("az netappfiles snapshot list -g {rg} -a %s -p %s -v %s" % (account_name, pool_name, volume_name)).get_output_in_json()
assert len(snapshot_list) == 2
@ResourceGroupPreparer()
def test_ext_get_snapshot(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
snapshot_name = self.create_random_name(prefix='cli-sn-', length=24)
volume = self.create_volume(account_name, pool_name, volume_name, '{rg}')
snapshot = self.cmd("az netappfiles snapshot create -g {rg} -a %s -p %s -v %s -s %s -l 'westus2' --file-system-id %s" % (account_name, pool_name, volume_name, snapshot_name, volume['fileSystemId'])).get_output_in_json()
snapshot = self.cmd("az netappfiles snapshot show -g {rg} -a %s -p %s -v %s -s %s" % (account_name, pool_name, volume_name, snapshot_name)).get_output_in_json()
assert snapshot['name'] == account_name + '/' + pool_name + '/' + volume_name + '/' + snapshot_name
snapshot_from_id = self.cmd("az netappfiles snapshot show --ids %s" % snapshot['id']).get_output_in_json()
assert snapshot_from_id['name'] == account_name + '/' + pool_name + '/' + volume_name + '/' + snapshot_name
``` |
{
"source": "jmartens/HypChat",
"score": 3
} |
#### File: HypChat/tests/common.py
```python
import unittest
import sys
import os, os.path
import ConfigParser
package = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, package)
import hypchat
class TestHypChat(unittest.TestCase):
def setUp(self):
self.setUpConfig()
self.setUpHipChat()
def setUpConfig(self):
search_paths = [os.path.expanduser('~/.hypchat'), '/etc/hypchat']
self.config = ConfigParser.ConfigParser()
self.config.read(search_paths)
if self.config.has_section('HipChat'):
self.access_token = self.config.get('HipChat', 'token')
elif 'HIPCHAT_TOKEN' in os.environ:
self.access_token = os.environ['HIPCHAT_TOKEN']
else:
print('Authorization token not detected! The token is pulled from ' \
'~/.hypchat, /etc/hypchat, or the environment variable HIPCHAT_TOKEN.')
def setUpHipChat(self):
self.hipchat = hypchat.HypChat(self.access_token)
```
#### File: HypChat/tests/test_rate_limit.py
```python
import unittest
import time
import requests_mock
from common import TestHypChat
class TestRateLimit(TestHypChat):
def setUp(self):
super(TestRateLimit, self).setUp()
self.call_counter = 1
def successfull_callback(self, request, context):
context.status_code = 200
headers = {
'X-Ratelimit-Limit': '100',
'X-Ratelimit-Remaining': '99',
'X-Ratelimit-Reset': str(time.time())
}
context.headers = headers
payload = {
'atlassian_id': None,
'created': '2014-09-26T10:44:04+00:00',
'email': '<EMAIL>',
'group': None,
'id': 12345,
'is_deleted': False,
'is_group_admin': True,
'is_guest': False,
'last_active': str(int(time.time())),
'links': {
'self': 'https://api.hipchat.com/v2/user/12345'
},
'mention_name': 'john',
'name': '<NAME>',
'photo_url': 'https://s3.amazonaws.com/uploads.hipchat.com/photos/12345/ABCDEFG.jpg',
'presence': {
'client': {
'type': 'http://hipchat.com/client/mac',
'version': '12345'
},
'is_online': True
},
'timezone': 'Europe/Berlin',
'title': 'Software Engineer',
'xmpp_jid': '<EMAIL>'
}
return payload
def rate_limited_callback(self, request, context):
context.status_code = 429
headers = {
'X-Ratelimit-Limit': '100',
'X-Ratelimit-Remaining': '0',
'X-Ratelimit-Reset': str(time.time() + 0.1)
}
context.headers = headers
payload = {
'error': {
'code': 429,
'type': 'Too Many Requests',
'message': 'Rate Limit exceeded'
}
}
return payload
def hipchat_callback(self, request, context):
rate_limit = self.call_counter % 2 == 0
self.call_counter += 1
if rate_limit:
return self.rate_limited_callback(request, context)
else:
return self.successfull_callback(request, context)
def runTest(self):
"""
We are mocking the request so every second call is rate limited.
"""
with requests_mock.Mocker() as m:
m.register_uri('GET', 'https://api.hipchat.com/v2/user/@john', status_code=200, json=self.hipchat_callback)
for i in xrange(3):
self.hipchat.get_user('@john')
``` |
{
"source": "jmartens/PandasGUI",
"score": 3
} |
#### File: PandasGUI/pandasgui/automatic_profiling.py
```python
def enable_profiling(glob_call_time_stack=[]):
import sys
import inspect
import os
import time
def tracefunc(frame, event, arg,
indent=[0], filename=__file__, call_time_stack=glob_call_time_stack, last_line_num=[None],
last_line_start=[None]):
'''
frame: See frame in table https://docs.python.org/3/library/inspect.html#types-and-members
event: https://docs.python.org/3/library/inspect.html#types-and-members
arg: For 'return' event, arg is return value.
For 'exception' event, arg is (exception, value, traceback).
Otherwise arg is None
'''
global TRACEFUNC_PROJECT_FILES
if 'TRACEFUNC_PROJECT_FILES' not in globals():
caller_file = inspect.currentframe().f_code.co_filename
dirname = os.path.dirname(caller_file)
# Find project root path
while True:
if '__init__.py' in os.listdir(dirname):
project_root = dirname
break
elif os.path.dirname(dirname) == dirname:
# Got to drive root without __init__ file so project root is just the folder containing the caller file
project_root = os.path.dirname(caller_file)
break
dirname = os.path.dirname(dirname)
TRACEFUNC_PROJECT_FILES = []
for root, subFolder, files in os.walk(project_root):
for item in files:
if item.endswith(".py"):
path = str(os.path.join(root, item)).replace('\\', '/')
TRACEFUNC_PROJECT_FILES.append(path)
line = frame.f_lineno
file_path = frame.f_code.co_filename
file_name = os.path.basename(file_path)
object_name = frame.f_code.co_name
if file_path in TRACEFUNC_PROJECT_FILES:
if event == "call":
call_time_stack.append(time.time())
indent[0] += 3
print("-" * indent[0] + f"> call function {frame.f_code.co_name} [Line {frame.f_lineno}]")
elif event == "return":
print("<" + "-" * indent[0], "exit function", frame.f_code.co_name,
f"({time.time() - call_time_stack.pop():.2f}s)")
indent[0] -= 3
if event == 'line':
this_line_start = time.time()
this_line_num = frame.f_lineno
return tracefunc
sys.settrace(tracefunc)
```
#### File: pandasgui/widgets/dialogs.py
```python
import sys
from PyQt5 import QtWidgets
import logging
logger = logging.getLogger(__name__)
class InputDialog(QtWidgets.QDialog):
"""
Example usage:
dialog = InputDialog("Add Column", {"Column Title": str, "Column Formula": str})
column_title, column_formula = dialog
"""
def __init__(self, title, schema, parent=None):
super().__init__(parent)
layout = QtWidgets.QFormLayout(self)
self.fields = []
for field_name, field_type in schema.items():
if field_type == str:
widget = QtWidgets.QLineEdit(self)
if field_type == bool:
pass # TODO
self.fields.append(widget)
layout.addRow(field_name, self.first)
buttonBox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel, self);
layout.addRow("Second text", self.second)
layout.addWidget(buttonBox)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
def getInputs(self):
return (self.first.text(), self.second.text())
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
dialog = InputDialog()
if dialog.exec():
print(dialog.getInputs())
exit(0)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.