id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/Keras_Applications_3D-0.1.0-py3-none-any.whl/keras_applications_3d/applications/inception_resnet_v2.py | from tensorflow.python.keras import backend
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import VersionAwareLayers
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_URL = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/inception_resnet_v2/')
layers = None
@keras_export('keras.applications.inception_resnet_v2.InceptionResNetV2',
'keras.applications.InceptionResNetV2')
def InceptionResNetV2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
"""Instantiates the Inception-ResNet v2 architecture.
Reference:
- [Inception-v4, Inception-ResNet and the Impact of
Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
(AAAI 2017)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For InceptionResNetV2, call
`tf.keras.applications.inception_resnet_v2.preprocess_input`
on your inputs before passing them to the model.
`inception_resnet_v2.preprocess_input`
will scale input pixels between -1 and 1.
Args:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(299, 299, 3)` (with `'channels_last'` data format)
or `(3, 299, 299)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 75.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `'avg'` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `'max'` means that global max pooling will be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=299,
min_size=75,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Stem block: 35 x 35 x 192
x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
x = conv2d_bn(x, 32, 3, padding='valid')
x = conv2d_bn(x, 64, 3)
x = layers.MaxPooling2D(3, strides=2)(x)
x = conv2d_bn(x, 80, 1, padding='valid')
x = conv2d_bn(x, 192, 3, padding='valid')
x = layers.MaxPooling2D(3, strides=2)(x)
# Mixed 5b (Inception-A block): 35 x 35 x 320
branch_0 = conv2d_bn(x, 96, 1)
branch_1 = conv2d_bn(x, 48, 1)
branch_1 = conv2d_bn(branch_1, 64, 5)
branch_2 = conv2d_bn(x, 64, 1)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_pool = layers.AveragePooling2D(3, strides=1, padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1)
branches = [branch_0, branch_1, branch_2, branch_pool]
channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3
x = layers.Concatenate(axis=channel_axis, name='mixed_5b')(branches)
# 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
for block_idx in range(1, 11):
x = inception_resnet_block(
x, scale=0.17, block_type='block35', block_idx=block_idx)
# Mixed 6a (Reduction-A block): 17 x 17 x 1088
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 256, 3)
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_pool]
x = layers.Concatenate(axis=channel_axis, name='mixed_6a')(branches)
# 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
for block_idx in range(1, 21):
x = inception_resnet_block(
x, scale=0.1, block_type='block17', block_idx=block_idx)
# Mixed 7a (Reduction-B block): 8 x 8 x 2080
branch_0 = conv2d_bn(x, 256, 1)
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
branch_2 = conv2d_bn(x, 256, 1)
branch_2 = conv2d_bn(branch_2, 288, 3)
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = layers.Concatenate(axis=channel_axis, name='mixed_7a')(branches)
# 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
for block_idx in range(1, 10):
x = inception_resnet_block(
x, scale=0.2, block_type='block8', block_idx=block_idx)
x = inception_resnet_block(
x, scale=1., activation=None, block_type='block8', block_idx=10)
# Final convolution block: 8 x 8 x 1536
x = conv2d_bn(x, 1536, 1, name='conv_7b')
if include_top:
# Classification block
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='inception_resnet_v2')
# Load weights.
if weights == 'imagenet':
if include_top:
fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
weights_path = data_utils.get_file(
fname,
BASE_WEIGHT_URL + fname,
cache_subdir='models',
file_hash='e693bd0210a403b3192acc6073ad2e96')
else:
fname = ('inception_resnet_v2_weights_'
'tf_dim_ordering_tf_kernels_notop.h5')
weights_path = data_utils.get_file(
fname,
BASE_WEIGHT_URL + fname,
cache_subdir='models',
file_hash='d19885ff4a710c122648d3b5c3b684e4')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def conv2d_bn(x,
filters,
kernel_size,
strides=1,
padding='same',
activation='relu',
use_bias=False,
name=None):
"""Utility function to apply conv + BN.
Args:
x: input tensor.
filters: filters in `Conv2D`.
kernel_size: kernel size as in `Conv2D`.
strides: strides in `Conv2D`.
padding: padding mode in `Conv2D`.
activation: activation in `Conv2D`.
use_bias: whether to use a bias in `Conv2D`.
name: name of the ops; will become `name + '_ac'` for the activation
and `name + '_bn'` for the batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
x = layers.Conv2D(
filters,
kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name)(
x)
if not use_bias:
bn_axis = 1 if backend.image_data_format() == 'channels_first' else 3
bn_name = None if name is None else name + '_bn'
x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
if activation is not None:
ac_name = None if name is None else name + '_ac'
x = layers.Activation(activation, name=ac_name)(x)
return x
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
"""Adds an Inception-ResNet block.
This function builds 3 types of Inception-ResNet blocks mentioned
in the paper, controlled by the `block_type` argument (which is the
block name used in the official TF-slim implementation):
- Inception-ResNet-A: `block_type='block35'`
- Inception-ResNet-B: `block_type='block17'`
- Inception-ResNet-C: `block_type='block8'`
Args:
x: input tensor.
scale: scaling factor to scale the residuals (i.e., the output of passing
`x` through an inception module) before adding them to the shortcut
branch. Let `r` be the output from the residual branch, the output of this
block will be `x + scale * r`.
block_type: `'block35'`, `'block17'` or `'block8'`, determines the network
structure in the residual branch.
block_idx: an `int` used for generating layer names. The Inception-ResNet
blocks are repeated many times in this network. We use `block_idx` to
identify each of the repetitions. For example, the first
Inception-ResNet-A block will have `block_type='block35', block_idx=0`,
and the layer names will have a common prefix `'block35_0'`.
activation: activation function to use at the end of the block (see
[activations](../activations.md)). When `activation=None`, no activation
is applied
(i.e., "linear" activation: `a(x) = x`).
Returns:
Output tensor for the block.
Raises:
ValueError: if `block_type` is not one of `'block35'`,
`'block17'` or `'block8'`.
"""
if block_type == 'block35':
branch_0 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(branch_1, 32, 3)
branch_2 = conv2d_bn(x, 32, 1)
branch_2 = conv2d_bn(branch_2, 48, 3)
branch_2 = conv2d_bn(branch_2, 64, 3)
branches = [branch_0, branch_1, branch_2]
elif block_type == 'block17':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 128, 1)
branch_1 = conv2d_bn(branch_1, 160, [1, 7])
branch_1 = conv2d_bn(branch_1, 192, [7, 1])
branches = [branch_0, branch_1]
elif block_type == 'block8':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(branch_1, 224, [1, 3])
branch_1 = conv2d_bn(branch_1, 256, [3, 1])
branches = [branch_0, branch_1]
else:
raise ValueError('Unknown Inception-ResNet block type. '
'Expects "block35", "block17" or "block8", '
'but got: ' + str(block_type))
block_name = block_type + '_' + str(block_idx)
channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3
mixed = layers.Concatenate(
axis=channel_axis, name=block_name + '_mixed')(
branches)
up = conv2d_bn(
mixed,
backend.int_shape(x)[channel_axis],
1,
activation=None,
use_bias=True,
name=block_name + '_conv')
x = layers.Lambda(
lambda inputs, scale: inputs[0] + inputs[1] * scale,
output_shape=backend.int_shape(x)[1:],
arguments={'scale': scale},
name=block_name)([x, up])
if activation is not None:
x = layers.Activation(activation, name=block_name + '_ac')(x)
return x
@keras_export('keras.applications.inception_resnet_v2.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.inception_resnet_v2.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ | PypiClean |
/MPInterfaces_Latest-2.0.3.tar.gz/MPInterfaces_Latest-2.0.3/mpinterfaces/mat2d/electronic_structure/startup.py | from __future__ import print_function, division, unicode_literals
import itertools
import math
import numpy as np
import os
import shutil
import subprocess
from pymatgen import Structure
from pymatgen.io.vasp.inputs import Kpoints, Incar
from pymatgen.symmetry.bandstructure import HighSymmKpath
from mpinterfaces.mat2d import VASP_STD_BIN, QUEUE_SYSTEM
from mpinterfaces.mat2d.stability import relax
from mpinterfaces.utils import write_pbs_runjob, get_markovian_path,\
write_slurm_runjob, is_converged, get_magmom_string, remove_z_kpoints
__author__ = "Michael Ashton, Joshua J. Gabriel"
__copyright__ = "Copyright 2017, Henniggroup"
__maintainer__ = "Michael Ashton, Joshua J. Gabriel"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "March 3, 2017"
# TODO: document functions args and returns properly
# TODO: the run_* functions must be refactored to reduce code duplication
def run_pbe_calculation(dim=2, submit=True, force_overwrite=False):
"""
Setup and submit a normal PBE calculation for band structure along
high symmetry k-paths.
Args:
dim (int): 2 for relaxing a 2D material, 3 for a 3D material.
submit (bool): Whether or not to submit the job.
force_overwrite (bool): Whether or not to overwrite files
if an already converged vasprun.xml exists in the
directory.
"""
PBE_INCAR_DICT = {'EDIFF': 1e-6, 'IBRION': 2, 'ICHARG': 2, 'ISIF': 3,
'ISMEAR': 1, 'NSW': 0, 'LVTOT': True, 'LVHAR': True,
'LORBIT': 1, 'LREAL': 'Auto', 'NPAR': 4,
'PREC': 'Accurate', 'LWAVE': True, 'SIGMA': 0.1,
'ENCUT': 500, 'ISPIN': 2}
directory = os.path.basename(os.getcwd())
if not os.path.isdir('pbe_bands'):
os.mkdir('pbe_bands')
if force_overwrite or not is_converged('pbe_bands'):
shutil.copy("CONTCAR", "pbe_bands/POSCAR")
structure = Structure.from_file("pbe_bands/POSCAR")
if os.path.isfile("POTCAR"):
shutil.copy("POTCAR", "pbe_bands")
shutil.copy("CHGCAR", "pbe_bands")
PBE_INCAR_DICT.update(
{'MAGMOM': get_magmom_string(structure)})
Incar.from_dict(PBE_INCAR_DICT).write_file('pbe_bands/INCAR')
os.chdir('pbe_bands')
write_band_structure_kpoints(structure, dim=dim)
if QUEUE_SYSTEM == 'pbs':
write_pbs_runjob(directory, 1, 16, '800mb', '6:00:00', VASP_STD_BIN)
submission_command = 'qsub runjob'
elif QUEUE_SYSTEM == 'slurm':
write_slurm_runjob(directory, 16, '800mb', '6:00:00', VASP_STD_BIN)
submission_command = 'sbatch runjob'
if submit:
_ = subprocess.check_output(submission_command.split())
os.chdir('../')
def run_hse_prep_calculation(dim=2, submit=True):
"""
Submits a quick static calculation to calculate the IBZKPT
file using a smaller number of k-points (200/atom instead of
1000/atom). The other outputs from this calculation are
essentially useless.
Args:
dim (int): 2 for relaxing a 2D material, 3 for a 3D material.
submit (bool): Whether or not to submit the job.
"""
if not os.path.isdir('hse_prep'):
os.mkdir('hse_prep')
os.chdir('hse_prep')
shutil.copy('../CONTCAR', 'POSCAR')
if os.path.isfile('../POTCAR'):
shutil.copy('../POTCAR', '.')
relax(dim=2, submit=False)
incar_dict = Incar.from_file('INCAR').as_dict()
incar_dict.update({'NSW': 0, 'NELM': 1, 'LWAVE': False, 'LCHARG': False,
'LAECHG': False})
Incar.from_dict(incar_dict).write_file('INCAR')
Kpoints.automatic_density(
Structure.from_file('POSCAR'), 200).write_file('KPOINTS')
if dim == 2:
kpts_lines = open('KPOINTS').readlines()
with open('KPOINTS', 'w') as kpts:
for line in kpts_lines[:3]:
kpts.write(line)
kpts.write(kpts_lines[3].split()[0] + ' '
+ kpts_lines[3].split()[1] + ' 1')
if QUEUE_SYSTEM == 'pbs':
write_pbs_runjob('{}_prep'.format(
os.getcwd().split('/')[-2]), 1, 16, '800mb', '6:00:00', VASP_STD_BIN)
submission_command = 'qsub runjob'
elif QUEUE_SYSTEM == 'slurm':
write_slurm_runjob('{}_prep'.format(
os.getcwd().split('/')[-2]), 16, '800mb', '6:00:00', VASP_STD_BIN)
submission_command = 'sbatch runjob'
if submit:
_ = subprocess.check_output(submission_command.split())
os.chdir('../')
def run_hse_calculation(dim=2, submit=True, force_overwrite=False):
"""
Setup/submit an HSE06 calculation to get an accurate band structure.
See http://cms.mpi.univie.ac.at/wiki/index.php/Si_bandstructure for
more details.
Args:
dim (int): 2 for relaxing a 2D material, 3 for a 3D material.
submit (bool): Whether or not to submit the job.
force_overwrite (bool): Whether or not to overwrite files
if an already converged vasprun.xml exists in the
directory.
"""
HSE_INCAR_DICT = {'LHFCALC': True, 'HFSCREEN': 0.2, 'AEXX': 0.25,
'ALGO': 'D', 'TIME': 0.4, 'NSW': 0,
'LVTOT': True, 'LVHAR': True, 'LORBIT': 11,
'LWAVE': True, 'NPAR': 8, 'PREC': 'Accurate',
'EDIFF': 1e-4, 'ENCUT': 450, 'ICHARG': 2, 'ISMEAR': 1,
'SIGMA': 0.1, 'IBRION': 2, 'ISIF': 3, 'ISPIN': 2}
if not os.path.isdir('hse_bands'):
os.mkdir('hse_bands')
if force_overwrite or not is_converged('hse_bands'):
os.chdir('hse_bands')
os.system('cp ../CONTCAR ./POSCAR')
structure = Structure.from_file("POSCAR")
if os.path.isfile('../POTCAR'):
os.system('cp ../POTCAR .')
HSE_INCAR_DICT.update(
{'MAGMOM': get_magmom_string(structure)}
)
Incar.from_dict(HSE_INCAR_DICT).write_file('INCAR')
# Re-use the irreducible brillouin zone KPOINTS from a
# previous standard DFT run.
write_band_structure_kpoints(structure, dim=dim)
if QUEUE_SYSTEM == 'pbs':
write_pbs_runjob('{}_hsebands'.format(
os.getcwd().split('/')[-2]), 2, 64, '1800mb', '50:00:00', VASP_STD_BIN)
submission_command = 'qsub runjob'
elif QUEUE_SYSTEM == 'slurm':
write_slurm_runjob('{}_hsebands'.format(
os.getcwd().split('/')[-2]), 64, '1800mb', '50:00:00', VASP_STD_BIN)
submission_command = 'sbatch runjob'
if submit:
_ = subprocess.check_output(submission_command.split())
os.chdir('../')
def write_band_structure_kpoints(structure, n_kpts=20, dim=2,
ibzkpt_path="../"):
"""
Writes a KPOINTS file for band structure calculations. Does
not use the typical linemode syntax for NSCF calculations,
but uses the IBZKPT + high-symmetry path syntax described in
http://cms.mpi.univie.ac.at/wiki/index.php/Si_bandstructure
so that SCF calculations can be performed. This is more
reliable than re-using the CHGCAR from a previous run, which
often results in "dimensions on the CHGCAR are different"
errors in VASP.
Args:
structure (Structure): structure for determining k-path
n_kpts (int): number of divisions along high-symmetry lines
dim (int): 2 for a 2D material, 3 for a 3D material.
ibzkpt_path (str): location of IBZKPT file. Defaults to one
directory up.
"""
ibz_lines = open(os.path.join(ibzkpt_path, "IBZKPT")).readlines()
for i, line in enumerate(ibz_lines):
if "Tetrahedra" in line:
ibz_lines = ibz_lines[:i]
break
n_ibz_kpts = int(ibz_lines[1].split()[0])
kpath = HighSymmKpath(structure)
Kpoints.automatic_linemode(n_kpts, kpath).write_file('KPOINTS')
if dim == 2:
remove_z_kpoints()
linemode_lines = open('KPOINTS').readlines()
abs_path = []
i = 4
while i < len(linemode_lines):
start_kpt = linemode_lines[i].split()
end_kpt = linemode_lines[i+1].split()
increments = [
(float(end_kpt[0]) - float(start_kpt[0])) / 20,
(float(end_kpt[1]) - float(start_kpt[1])) / 20,
(float(end_kpt[2]) - float(start_kpt[2])) / 20
]
abs_path.append(start_kpt[:3] + ['0', start_kpt[4]])
for n in range(1, 20):
abs_path.append(
[str(float(start_kpt[0]) + increments[0] * n),
str(float(start_kpt[1]) + increments[1] * n),
str(float(start_kpt[2]) + increments[2] * n), '0']
)
abs_path.append(end_kpt[:3] + ['0', end_kpt[4]])
i += 3
n_linemode_kpts = len(abs_path)
with open('KPOINTS', 'w') as kpts:
kpts.write('Automatically generated mesh\n')
kpts.write('{}\n'.format(n_ibz_kpts + n_linemode_kpts))
kpts.write('Reciprocal Lattice\n')
for line in ibz_lines[3:]:
kpts.write(line)
for point in abs_path:
kpts.write('{}\n'.format(' '.join(point)))
def get_2D_hse_kpoints(struct_for_path, ibzkpth):
"""
Args:
struct_for_path: Structure from which linemode k-points will
be generated.
ibzkpth:
Returns:
the Kpoints file object in the form of a string
ready for execution by MPInterfaces
calibrate objects
"""
# Read IBZKPT from prep step
ibz_lines = open(ibzkpth).readlines()
n_ibz_kpts = int(ibz_lines[1].split()[0])
# Read linemode KPOINTs from the dict (makes sure it is Kpoints
# file with only 20 per atom for the optimized settings
# Kpoints.from_dict(kpoint_dict).write_file('linemode_KPOINTS')
kpath = HighSymmKpath(struct_for_path)
Kpoints.automatic_linemode(20, kpath).write_file('KPOINTS_linemode')
remove_z_kpoints_linemode()
linemode_lines = open('KPOINTS_linemode').readlines()
# put them together
abs_path = []
for i in range(4, len(linemode_lines), 3):
start_kpt = linemode_lines[i].split()
end_kpt = linemode_lines[i+1].split()
increments = [
(float(end_kpt[0]) - float(start_kpt[0])) / 20,
(float(end_kpt[1]) - float(start_kpt[1])) / 20,
(float(end_kpt[2]) - float(start_kpt[2])) / 20
]
abs_path.append(start_kpt[:3] + ['0', start_kpt[4]])
for n in range(1, 20):
abs_path.append(
[str(float(start_kpt[0]) + increments[0] * n),
str(float(start_kpt[1]) + increments[1] * n),
str(float(start_kpt[2]) + increments[2] * n), '0']
)
abs_path.append(end_kpt[:3] + ['0', end_kpt[4]])
n_linemode_kpts = len(abs_path)
# write out the kpoints file and return the object
Kpoints_hse_file = '\n'.join(
['Automatically generated mesh',
'{}'.format(n_ibz_kpts + n_linemode_kpts),
'Reciprocal Lattice',
'{}'.format(str(''.join([line for line in ibz_lines[3:]])))]) + \
'{}'.format(str('\n'.join(
[' '.join(point) for point in abs_path])))
## can be used for test print out
# with open('KPOINTS_HSE', 'w') as kpts:
# kpts.write('Automatically generated mesh\n')
# kpts.write('{}\n'.format(n_ibz_kpts + n_linemode_kpts))
# kpts.write('Reciprocal Lattice\n')
# for line in ibz_lines[3:]:
# kpts.write(line)
# for point in abs_path:
# kpts.write('{}\n'.format(' '.join(point)))
return Kpoints_hse_file
def get_2D_incar_hse_prep(incar_dict):
"""
linker for prep calculation
Args:
incar_dict (dict)
Returns:
dict: incar dict
"""
print('updating INCAR for prep calculation ')
INCAR_PREP = {'NSW': 0,
'NELM': 1,
'LWAVE': False,
'LCHARG': False,
'LAECHG': False}
incar_dict.update(INCAR_PREP)
return incar_dict
def get_2D_incar_hse(incar_dict):
"""
linker function to complete the HSE input deck to MPInterfaces
Args:
incar_dict (dict)
Returns:
dict: incar dict
"""
HSE_INCAR_DICT = {'LHFCALC': True, 'HFSCREEN': 0.2, 'AEXX': 0.25,
'ALGO': 'D', 'TIME': 0.4, 'NSW': 0, 'NELM': 75,
'LVTOT': True, 'LVHAR': True, 'LORBIT': 11,
'LWAVE': False, 'NPAR': 8, 'PREC': 'Accurate',
'EDIFF': 1e-4, 'ENCUT': 450, 'ICHARG': 2, 'ISMEAR': 1,
'SIGMA': 0.1, 'IBRION': 2, 'ISIF': 3, 'ISPIN': 2}
incar_dict.update(HSE_INCAR_DICT)
return incar_dict | PypiClean |
/CodeComb-0.1.9-py3-none-any.whl/CodeComb_Core/make_code_corpus.py |
import re
import os
import pandas as pd
import sys
import pickle
import configparser
from CodeComb_Core.embeddings import *
from CodeComb_Core.utils import *
from CodeComb_Core.env import *
## Read the file and pack it into file_info dict
def prepare_file(filename, location):
with open(os.path.join(location,filename), "r", encoding = 'latin-1') as fp:
file_data = fp.read()
#file_data = process_text(file_data)
file_info = dict()
file_info["body"] = file_data
file_info["name"] = filename
file_info["location"] = location
file_info["ext"] = filename[filename.rfind('.')+1:]
return file_info
# From file metas read all file content of supported formats
def read_all_supported_files(file_metas):
print ("READING FILES")
print ("Total files-{}".format(len(file_metas)))
file_contents = []
for i, info in enumerate(file_metas):
path_to_file = info['location']
file_name = info['name']
if ((i % 100) == 0):
print ("Processed {} files".format(i-1))
content = prepare_file(file_name, path_to_file)
file_contents.append(content)
return file_contents
def test_read_all_supported_files():
path = os.getcwd()
print ("TEST READ ALL CPP FILES")
files = get_supported_files_name_location(path)
file_contents = read_all_supported_files(files)
print (file_contents)
def test_prepare_file():
location = os.getcwd()
filename = "sample_cpp.cpp"
file_info = prepare_file(filename, location)
print ("TEST PREPARE FILE")
print (file_info)
return
## Load Formats from the config file
def load_format():
config = configparser.ConfigParser()
config.read(os.path.join(os.path.expanduser("~"), "codecomb_config.ini"))
fmts = list(config['FORMAT'].values())
formats_list = ["."+fmt for fmt in fmts]
return formats_list
# Get Metas
def get_supported_files_name_location(path):
files = []
# r=root, d=directories, f = files
for r, _, f in os.walk(path):
for file in f:
formats = load_format()
for fmt in formats:
if file.endswith(fmt):
info = {}
info['name'] = file
info['location'] = r
files.append(info)
return files
def test_get_supported_files_name_location():
print ("TEST GET SUPPORTED FILES NAME LOCATION")
path = "."
file_infos = get_supported_files_name_location(path)
print (file_infos)
print (len(file_infos))
## Recursively reads all supported file names from current locations
## And forms a DF
def get_all_files_currentdir(pickle_file):
PATH = os.getcwd()
files = get_supported_files_name_location(PATH)
file_contents = read_all_supported_files(files)
df_corpus = pd.DataFrame(file_contents)
logging.info (df_corpus[['body', 'location']].head())
logging.info (df_corpus.columns)
logging.info (df_corpus.describe())
with open(os.path.join(DATA_PATH, pickle_file),"wb") as fp:
pickle.dump(df_corpus, fp)
def test_all_files_currentdir():
print ("TEST ALL FILES CURRENTDIR")
get_all_files_currentdir("tmp.pkl")
## Given a df file source, it makes word-embeddings and document vectors out of it
def embed_df_corpus(df_file):
print (20*"=" + "Initializing Corpus embeddings" + 20*"=")
with open(os.path.join(DATA_PATH, df_file), "rb") as dfPickled:
dfCorpus = pickle.load(dfPickled)
dfCorpus["corpus"] = dfCorpus["name"] + " " + dfCorpus["location"] + " " + dfCorpus["body"]
X = dfCorpus["corpus"].tolist()
X_cleaned = [process_text(X_i) for X_i in X]
## Create word embedding
emb = make_word_embedding(X_cleaned)
## Create Document vectors of entire corpus
embed_corpus(X_cleaned, emb)
print (20*"=" + "Corpus Embeddings done" + 20*"=")
def test_embed_df_corpus():
df_file = "df_corpus.pkl"
embed_df_corpus(df_file)
## Initializes / Indexes the corpus in two steps
## Gets all metas and body of the corpus
def init_corpus(df_file="df_corpus.pkl", debug=False):
## Set debug mode
if debug:
logging.basicConfig(level=logging.INFO)
logging.info("Debug mode on")
## Ensure all output dirs in place
init_path()
print (20*"#" + "prepping the corpus" + 20*"#")
t = time()
get_all_files_currentdir(df_file)
embed_df_corpus(df_file)
print('Time taken to init: {} mins'.format(round((time() - t) / 60, 2)))
if __name__ == "__main__":
## Uncomment below to test
# test_prepare_file()
# test_get_cpp_files_name_location()
# test_read_all_cpp_files()
# test_all_files_currentdir()
#test_embed_df_corpus()
logging.basicConfig(format="%(levelname)s - %(asctime)s: %(message)s", datefmt= '%H:%M:%S', level=logging.INFO)
init_corpus() | PypiClean |
/GooseSLURM-0.12.4.tar.gz/GooseSLURM-0.12.4/docs/cheat-sheet.rst |
***********
Cheat-sheet
***********
SLURM commands
--------------
========================= =======================================================================================================
Command Description
========================= =======================================================================================================
``sbatch "SLURM-file"`` submit a job, controlled using the script in ``"SLURM-file"``
------------------------- -------------------------------------------------------------------------------------------------------
``scancel "job-id"`` delete the job with identifier ``"job-id"``
------------------------- -------------------------------------------------------------------------------------------------------
``squeue`` list basic information of all jobs
------------------------- -------------------------------------------------------------------------------------------------------
``sinfo`` list basic information of all nodes
------------------------- -------------------------------------------------------------------------------------------------------
``scontrol`` view or modify Slurm configuration and state
------------------------- -------------------------------------------------------------------------------------------------------
``sacct``. pull up status information about past job
========================= =======================================================================================================
GooseSLURM wrapper functions
----------------------------
========================= =======================================================================================================
Command Description
========================= =======================================================================================================
``Gsub [...]`` submit a (batch of) job(s), controlled using the provided scripts
------------------------- -------------------------------------------------------------------------------------------------------
``Gdel [...]`` delete a (batch of) job(s)
------------------------- -------------------------------------------------------------------------------------------------------
``Gstat`` list basic information of jobs
------------------------- -------------------------------------------------------------------------------------------------------
``Ginfo`` list basic information of all nodes
------------------------- -------------------------------------------------------------------------------------------------------
``Gps`` list basic information of all running processes (on the system that you are logged onto)
========================= =======================================================================================================
See :ref:`sec-scripts`
Monitor processes and resources
-------------------------------
========================= =======================================================================================================
Command Description
========================= =======================================================================================================
``top`` live-monitor of current running processes
------------------------- -------------------------------------------------------------------------------------------------------
``ps`` show snap-shot of processes
------------------------- -------------------------------------------------------------------------------------------------------
``ps -aux`` show snap-shot of all processes
------------------------- -------------------------------------------------------------------------------------------------------
``du -h`` size of directories
------------------------- -------------------------------------------------------------------------------------------------------
``df -h`` total, used, and available disk-space
========================= =======================================================================================================
Directory operations
--------------------
========================= =======================================================================================================
Command Description
========================= =======================================================================================================
``pwd`` print working (current) directory
------------------------- -------------------------------------------------------------------------------------------------------
``mkdir "dir"`` make new directory ``"dir"``
------------------------- -------------------------------------------------------------------------------------------------------
``cd "dir"`` go to directory ``"dir"``
------------------------- -------------------------------------------------------------------------------------------------------
``cd ..`` go up one directory
------------------------- -------------------------------------------------------------------------------------------------------
``ls`` list files
------------------------- -------------------------------------------------------------------------------------------------------
``ls -lh`` list files, with detailed file information
========================= =======================================================================================================
File-operations
---------------
========================= =======================================================================================================
Command Description
========================= =======================================================================================================
``cat "file"`` print file content to the screen
------------------------- -------------------------------------------------------------------------------------------------------
``head "file"`` show first 10 line of the file content
------------------------- -------------------------------------------------------------------------------------------------------
``tail "file"`` show last 10 line of the file content
------------------------- -------------------------------------------------------------------------------------------------------
``cp "file1" "file2"`` copy ``"file1"`` to ``"file2"``
------------------------- -------------------------------------------------------------------------------------------------------
``mv "file1" "file2"`` move (rename) ``"file1"`` to ``"file2"``
------------------------- -------------------------------------------------------------------------------------------------------
``rm "file"`` remove ``"file"``
------------------------- -------------------------------------------------------------------------------------------------------
``rm -r "dir"`` remove ``"dir"``
========================= =======================================================================================================
Bash commands
-------------
========================= =======================================================================================================
Command Description
========================= =======================================================================================================
``whoami`` show your username
------------------------- -------------------------------------------------------------------------------------------------------
``man command`` show manual of a ``command`` (sometimes: ``command -h`` or ``command --help``
========================= =======================================================================================================
Search files
------------
========================= =======================================================================================================
Command Description
========================= =======================================================================================================
``find`` find files
------------------------- -------------------------------------------------------------------------------------------------------
``grep`` show matched pattern in file content
========================= =======================================================================================================
Keyboard shortcuts
------------------
========================= =======================================================================================================
Command Description
========================= =======================================================================================================
:kbd:`Ctrl+c` abort command
------------------------- -------------------------------------------------------------------------------------------------------
:kbd:`Ctrl+r` search command history (use :kbd:`Ctrl+r` to proceed to next match, and arrows to modify the command)
------------------------- -------------------------------------------------------------------------------------------------------
:kbd:`Ctrl+d` exit terminal
========================= =======================================================================================================
Further reading
---------------
* `Linux cheat sheet <http://overapi.com/linux/>`_
| PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/dizoo/gfootball/model/bots/rule_based_bot_model.py | from kaggle_environments.envs.football.helpers import *
from math import sqrt
from enum import Enum
import random
import torch
import torch.nn as nn
import numpy as np
from ding.torch_utils import tensor_to_list, one_hot, to_ndarray
from ding.utils import MODEL_REGISTRY
from ding.torch_utils import to_tensor, to_dtype
"""
Readable Reminder
*********************
class Action(Enum):
Idle = 0
Left = 1
TopLeft = 2
Top = 3
TopRight = 4
Right = 5
BottomRight = 6
Bottom = 7
BottomLeft = 8
LongPass= 9
HighPass = 10
ShortPass = 11
Shot = 12
Sprint = 13
ReleaseDirection = 14
ReleaseSprint = 15
Slide = 16
Dribble = 17
ReleaseDribble = 18
sticky_index_to_action = [
Action.Left,
Action.TopLeft,
Action.Top,
Action.TopRight,
Action.Right,
Action.BottomRight,
Action.Bottom,
Action.BottomLeft,
Action.Sprint,
Action.Dribble
]
class PlayerRole(Enum):
GoalKeeper = 0
CenterBack = 1
LeftBack = 2
RightBack = 3
DefenceMidfield = 4
CentralMidfield = 5
LeftMidfield = 6
RIghtMidfield = 7
AttackMidfield = 8
CentralFront = 9
class GameMode(Enum):
Normal = 0
KickOff = 1
GoalKick = 2
FreeKick = 3
Corner = 4
ThrowIn = 5
Penalty = 6
"""
class Stiuation(Enum):
Delaying = 0
Offencing = 1
Deffencing = 2
class Line(object):
def __init__(self, pos1, pos2):
self.a = 1
x1, y1 = pos1
x2, y2 = pos2
if (y2 - y1) != 0.0:
self.b = (x2 - x1) / (y2 - y1)
else:
self.b = 1e5
self.c = -x1 - (self.b * y2)
self.length = dist(pos1, pos2)
def distToLine(self, pos):
return (self.a * pos[0] + self.b * pos[1] + self.c) / sqrt(self.a ** 2 + self.b ** 2)
roles = [0, 7, 9, 2, 1, 1, 3, 5, 5, 5, 6]
passes = [Action.ShortPass, Action.LongPass, Action.HighPass]
offenseScore = {
0: [-8.0, 0.0],
1: [0.6, 0.8],
2: [0.6, 0.85],
3: [0.6, 0.85],
4: [0.7, 0.9],
5: [0.8, 0.9],
6: [1, 1],
7: [1, 1],
8: [1, 1.1],
9: [1.1, 1.2]
}
passBias = 2.0
defenceThreatDist = 0.3
threatAvg = 3.0
shotDistAbs = 0.03
shotDistFactor = 0.6
offenseGoalDistFactor = 3.0
offenseKeeperDistFactor = 0.5
offenseTirenessFactor = 0.3
sprintTirenessFactor = 0.5
passForShotFactor = 0.6
FREEKICK_SHOT_AREA = [[0.5, 1], [-0.2, 0.2]]
START_SHOT_AREA1 = [[0.6, 0.75], [-0.2, 0.2]]
START_SHOT_AREA2 = [[0.75, 0.95], [-0.13, 0.13]]
PASS_FOR_SHOT_AREA1 = [[0.75, 1], [-0.42, -0.18]]
PASS_FOR_SHOT_AREA2 = [[0.75, 1], [0.18, 0.42]]
KEEPER_ZONE_AREA = [[0.75, 1], [-0.2, 0.2]]
LONG_SHOT_RANGE_AREA = [[0.5, 1], [-0.25, 0.25]]
SPRINT_AREA = [[-0.1, 0.6], [-0.42, 0.42]]
DEFENCE_SPRING_AREA = [[-0.7, 0.4], [-0.4, 0.4]]
# DRIBBLE_AREA = [[-0.1, 0.2], [-0.3, 0.3]]
SLIDE_AREA = [[-0.65, 0], [-0.42, 0.42]]
takenSelfFactor = 0.5
passFactors = {Action.HighPass: [1.0, 1.2, 3.0], Action.ShortPass: [1.1, 1.5, 1.5], Action.LongPass: [1.0, 1.2, 2]}
# top right/ Bottom left corner are:
# [1, -0.42] and [-1, 0.42], respectively.
def dist(pos1, pos2):
return sqrt((pos1[1] - pos2[1]) ** 2 + (pos1[0] - pos2[0]) ** 2)
def dirSign(x):
if abs(x) < 0.01:
return 1
elif x < 0:
return 0
return 2
def plusPos(pos1, pos2):
return [pos1[0] + pos2[0], pos1[1] + pos2[1]]
def vec2dir(vec):
p = sqrt(vec[0] ** 2 + vec[1] ** 2)
coef = 1 / p
return [vec[0] * coef, vec[1] * coef]
TOTAL_STEP = 3000
# functions help moving
directions = [
[Action.TopLeft, Action.Top, Action.TopRight], [Action.Left, Action.Idle, Action.Right],
[Action.BottomLeft, Action.Bottom, Action.BottomRight]
]
def insideArea(pos, area):
return area[0][0] <= pos[0] <= area[0][1] and area[1][0] <= pos[1] <= area[1][1]
def gotoDir(x, y):
xdir = dirSign(x)
ydir = dirSign(y)
return directions[ydir][xdir]
class Processer(object):
def __init__(self):
self._obs = {}
self._curPos = None
self._keeperPos = None
self._goalPos = [1, 0]
self._shot_dir_ready = False
self._pass_dir_ready = False
self._ball_is_free = False
self._we_have_ball = False
self._enemy_have_ball = False
self._our_goalkeeper_have_ball = False
self._shot_buf_player = None
self._shot_buf_step = -1
self._pass_buf_player = None
self._pass_buf_step = -1
self._score_diff = 0
self._pass_type = Action.ShortPass
def preprocess(self):
self._game_mode = self._obs['game_mode']
self._cur_player = self._obs['active']
if self._obs['score'].shape[0] == 2:
self._score_diff = self._obs['score'][0] - self._obs['score'][1]
else:
self._score_diff = self._obs['score']
self._curPos = self._obs['left_team'][self._obs['active']]
self._curDir = self._obs['left_team_direction'][self._obs['active']]
self._keeperPos = self._obs['right_team'][0]
self._ballPos = self._obs['ball']
self._ourPos = self._obs['left_team']
self._enemyPos = self._obs['right_team']
self._ball_is_free = self._obs['ball_owned_team'] == -1
self._we_have_ball = self._obs['ball_owned_team'] == 0
self._enemy_have_ball = self._obs['ball_owned_team'] == 1
self._our_goalkeeper_have_ball = self._obs['ball_owned_player'] == 0 and self._we_have_ball
self._our_active_have_ball = self._we_have_ball and self._obs['ball_owned_player'] == self._obs['active']
self._controlled_role = self._obs['left_team_roles'][self._obs['active']]
self._most_foward_enemy_pos = self.getMostForwardEnemyPos()
self._closest_enemey_pos = self.getClosestEnemyPos()
self._closest_enemey_to_cur_vec = [
self._curPos[0] - self._closest_enemey_pos[0], self._curPos[1] - self._closest_enemey_pos[1]
]
self._closest_enemey_to_cur_dir = vec2dir(self._closest_enemey_to_cur_vec)
self._cloest_enemey_dist = dist(self._curPos, self._closest_enemey_pos)
self._remain_step = self._obs['steps_left']
self._cur_tireness = self._obs['left_team_tired_factor'][self._obs['active']]
self._our_tireness = self._obs['left_team_tired_factor']
self._dribbling = Action.Dribble in self._obs['sticky_actions']
self._sprinting = Action.Sprint in self._obs['sticky_actions']
self._our_goalkeeper_active = self._cur_player == 0
# TODO
self._ball_dir = self._obs['ball_direction']
self._ball_owner_dir = self.getBallOwnerDir()
self._ball_owner_pos = self.getBallOwnerPos()
if self._enemy_have_ball:
self._closest_to_enemy_pos, self._closest_to_enemy_player = self.getClosestToEnemy()
if not self._shot_dir_ready:
self._shot_buf_player = -1
# general helper
################################
def getRole(self, i):
return roles[i]
# general helper for init
#################################
def getBallOwnerPos(self):
if self._ball_is_free:
return None
elif self._we_have_ball:
return self._obs['left_team'][self._obs['ball_owned_player']]
else:
return self._obs['right_team'][self._obs['ball_owned_player']]
def getBallOwnerDir(self):
if self._ball_is_free:
return None
elif self._we_have_ball:
return self._obs['left_team_direction'][self._obs['ball_owned_player']]
else:
return self._obs['right_team_direction'][self._obs['ball_owned_player']]
# general movement
##################################
def gobetweenKeeperGate(self):
xdir = dirSign(self._keeperPos[0] / 2 + self._goalPos[0] / 2 - self._curPos[0] - 0.05)
ydir = dirSign(self._keeperPos[1] / 2 + self._goalPos[1] / 2 - self._curPos[1])
return directions[ydir][xdir]
def gotoDst(self, x, y):
xdir = dirSign(x - self._curPos[0])
ydir = dirSign(y - self._curPos[1])
return directions[ydir][xdir]
def getMostForwardEnemyPos(self):
ret = [0, 0]
i = 0
for pos in self._obs['right_team']:
if i == 0:
i += 1
continue
if pos[0] > ret[0]:
ret = pos
return ret
def getAvgDefenceDistToPlayer(self, *args):
if len(args) == 0:
i = self._cur_player
else:
i = args[0]
sumDist = 0
for pos in self._enemyPos:
if dist(pos, self._ourPos[i]) < defenceThreatDist:
sumDist += dist(pos, self._ourPos[i])
return sumDist / threatAvg
def getClosestEnemy(self, *args):
if len(args) == 0:
i = self._cur_player
else:
i = args[0]
closest_pos = self._keeperPos
closest_index = 0
index = 0
closest_dist = 2
for pos in self._obs['right_team']:
if dist(pos, self._ourPos[i]) < dist(self._ourPos[i], closest_pos):
closest_pos = pos
closest_index = index
closest_dist = dist(pos, self._ourPos[i])
index += 1
return [closest_pos, closest_index, closest_dist]
def getClosestEnemyPos(self, *args):
if len(args) == 0:
i = self._cur_player
else:
i = args[0]
return self.getClosestEnemy(i)[0]
def getClosestEnemyDist(self, *args):
if len(args) == 0:
i = self._cur_player
else:
i = args[0]
return self.getClosestEnemy(i)[2]
def should_sprint(self):
if self._cur_tireness * sprintTirenessFactor > ((TOTAL_STEP - self._remain_step) / TOTAL_STEP) + 0.2:
return False
if self._enemy_have_ball:
return insideArea(self._curPos, DEFENCE_SPRING_AREA)
if self._we_have_ball:
return insideArea(self._curPos, SPRINT_AREA)
# help Judge Shooting
def shotWill(self):
if insideArea(self._curPos, START_SHOT_AREA1) or insideArea(self._curPos, START_SHOT_AREA2):
return True
elif not insideArea(self._keeperPos, KEEPER_ZONE_AREA) and insideArea(self._curPos, LONG_SHOT_RANGE_AREA):
return True
if dist(self._curPos, self._keeperPos) < shotDistFactor * dist(self._keeperPos, self._goalPos) + shotDistAbs:
return True
return False
# short pass
# def shortPassForShot(self):
# if insideArea(self._curPos, PASS_FOR_SHOT_AREA1) or insideArea(self._curPos, PASS_FOR_SHOT_AREA2):
# if not self.judgeOffside():
# return True
# return False
# help defense
#########################
def getClosestToEnemy(self):
retpos = self._obs['left_team'][0]
index = 0
retindex = index
for pos in self._obs['left_team']:
if dist(pos, self._ball_owner_pos) < dist(retpos, self._ball_owner_pos):
retpos = pos
retindex = index
index += 1
return retpos, retindex
def getMinxLeftTeam(self):
i = 0
retpos = [1, 0]
for pos in self._ourPos:
if i == 0:
i += 1
continue
if pos[0] < retpos[0]:
retpos = pos
return retpos
# After testing we know that sliding is not good, so no slide
def should_slide(self):
if not self._enemy_have_ball:
return False
# TODO
# replace 'and True' -> 'has yellow card'
if self._curPos[0] < self._ball_owner_pos[0] - 0.01 and self._curPos[0] < self._ballPos[0] - 0.007 and dist(
self._curPos, self._ball_owner_pos) < 0.03 and self._curDir[0] < 0 and insideArea(self._curPos,
SLIDE_AREA) and True:
return True
return False
# TODO
# can this be smarter?
def should_chase(self):
if self._curPos[0] > self._ball_owner_pos[0] + 0.02 and self._curPos[0] != self._closest_to_enemy_pos[0]:
return False
minLeftTeamPos = self.getMinxLeftTeam()
if self._curPos[0] > self._ball_owner_pos[0] + 0.03 and self._ball_owner_pos[0] - minLeftTeamPos[0] > 1.5 * abs(
self._ball_owner_pos[1] - minLeftTeamPos[1]):
return False
return True
# help not in our zone
def shotAway(self):
# disable or enable ?
return False
if self._curPos[0] < -0.7 and self._our_active_have_ball:
return True
return False
# def passAway(self):
# if self._curPos[0] < -0.4 and self._our_active_have_ball:
# return True
# return False
# functions use to judge passing
def judgeOffside(self, *args):
if len(args) == 0:
LeftTeam = 0
for pos in self._obs['left_team']:
LeftTeam = max(LeftTeam, pos[0])
else:
LeftTeam = self._ourPos[args[0]][0]
maxRightTeam = self.getMostForwardEnemyPos()[0]
return LeftTeam > maxRightTeam
# TODO
def passWill(self):
curOffenceMark = self.offenseMark(self._cur_player)
bestPassMark, bestPassType, bestPassIndex = self.getBestPass()
if bestPassMark > curOffenceMark + passBias:
# print("cur pos=", self._curPos)
# print("cur off score = ", curOffenceMark)
# print("best pass mark = ", bestPassMark)
# print("remain step = ", self._remain_step)
# print("best pass type = ", bestPassType)
# print("want to pass to = ", bestPassIndex)
return True, bestPassType, bestPassIndex
else:
return False, Action.ShortPass, -1
# TODO
def getBestPass(self):
if not self._our_active_have_ball:
return -1, Action.ShortPass, -1
bestPassType = Action.ShortPass
bestPassIndex = -1
bestPassMark = -10
for index in range(11):
# can't pass to yourself
if index == self._cur_player:
continue
passMark, passType = self.passMarkTo(index)
if passMark > bestPassMark:
bestPassMark = passMark
bestPassType = passType
bestPassIndex = index
return bestPassMark, bestPassType, bestPassIndex
# TODO
def passMarkTo(self, i):
bestPassType = Action.ShortPass
bestPassMark = -10
for t in passes:
if self.getPassSuccessMark(i, t) + self.offenseMark(i) > bestPassMark:
bestPassType = t
bestPassMark = self.getPassSuccessMark(i, t) + self.offenseMark(i)
return bestPassMark, bestPassType
def getRoleOffenceScore(self, i):
r = roles[i]
adder, multier = offenseScore[r]
return adder, multier
# TODO
# around 1.0 to 10.0
def offenseMark(self, i):
mark = 0.0
mark += self.getClosestEnemyDist(i)
mark += self.getAvgDefenceDistToPlayer(i)
# the closer to enemy goal the better
mark += 3.0 / (dist(self._ourPos[i], self._goalPos) + 0.2)
# but should be further to goalie
mark -= 0.5 / (dist(self._ourPos[i], self._keeperPos) + 0.2)
# offense pluser for role
adder, multier = self.getRoleOffenceScore(i)
mark *= multier
mark += adder
# ADD tireness
mark += 1.0 - self._our_tireness[i] * offenseTirenessFactor
if insideArea(self._ourPos[i], PASS_FOR_SHOT_AREA1) or insideArea(self._ourPos[i], PASS_FOR_SHOT_AREA2):
mark = mark * passForShotFactor
return mark
# TODO
# range from
def getPassSuccessMark(self, i, passType):
# you can't pass to yourself right?
if i == self._cur_player:
return -10
# can't pass offside ball
if self.judgeOffside(i):
return -10
mark = 0.0
# calculate intercept
# if passType == Action.HighPass:
# interceptFactor = 1.0
# distFactor = 1.2
# takenFactor = 3.0
# elif passType == Action.ShortPass:
# interceptFactor = 1.0
# distFactor = 1.5
# takenFactor = 1.5
# else:
# interceptFactor = 1.2
# distFactor = 1.2
# takenFactor = 1.5
interceptFactor = passFactors[passType][0]
distFactor = passFactors[passType][1]
takenFactor = passFactors[passType][2]
l = Line(self._curPos, self._ourPos[i])
minDist = 2
for pos in self._enemyPos:
minDist = min(minDist, l.distToLine(pos))
mark += (minDist * interceptFactor)
# calculate taken
taken = self.getClosestEnemyDist(i) + takenSelfFactor * self.getClosestEnemyDist()
mark += (taken * takenFactor)
# calculate dist
mark += (l.length * distFactor)
return mark
# freeKick
def shotFreeKick(self):
if insideArea(self._curPos, FREEKICK_SHOT_AREA):
return True
return False
# TODO
def cutAngleWithClosest(self):
x = self._keeperPos[0] / 2 + self._goalPos[0] / 2 - self._curPos[0]
y = self._keeperPos[1] / 2 + self._goalPos[1] / 2 - self._curPos[1]
x += self._closest_enemey_to_cur_dir[0] * (0.05 / (self._cloest_enemey_dist + 0.03))
y += self._closest_enemey_to_cur_dir[1] * (0.05 / (self._cloest_enemey_dist + 0.03))
return gotoDir(x, y)
def process(self, obs):
self._obs = obs
self.preprocess()
# TODO
# of course you can only shot in penalty
if self._game_mode == GameMode.Penalty:
return Action.Shot
if self._game_mode == GameMode.Corner:
if self._pass_dir_ready:
return self._pass_type
bestPassMark, bestPassType, bestPassIndex = self.getBestPass()
self._pass_dir_ready = True
self._pass_type = bestPassType
return self.gotoDst(self._ourPos[bestPassIndex][0], self._ourPos[bestPassIndex][1])
if self._game_mode == GameMode.FreeKick:
if self.shotFreeKick():
return Action.Shot
else:
if self._pass_dir_ready:
return self._pass_type
bestPassMark, bestPassType, bestPassIndex = self.getBestPass()
self._pass_dir_ready = True
self._pass_type = bestPassType
return self.gotoDst(self._ourPos[bestPassIndex][0], self._ourPos[bestPassIndex][1])
if self._game_mode == GameMode.KickOff:
return Action.ShortPass
if self._game_mode == GameMode.ThrowIn:
if self._pass_dir_ready:
return self._pass_type
bestPassMark, bestPassType, bestPassIndex = self.getBestPass()
self._pass_dir_ready = True
self._pass_type = bestPassType
return self.gotoDst(self._ourPos[bestPassIndex][0], self._ourPos[bestPassIndex][1])
if self._our_active_have_ball and not self._our_goalkeeper_have_ball:
if self._shot_dir_ready and self._cur_player == self._shot_buf_player and self._remain_step == self._shot_buf_step - 1:
self._shot_dir_ready = False
self._shot_buf_player = -1
self._shot_buf_step = -1
return Action.Shot
if self.shotWill():
self._shot_buf_player = self._cur_player
self._shot_buf_step = self._remain_step
self._shot_dir_ready = True
# TODO
# improve shot direction
return self.gobetweenKeeperGate()
if self._pass_dir_ready and self._cur_player == self._pass_buf_player and self._remain_step == self._pass_buf_step - 1:
self._pass_dir_ready = False
self._pass_buf_player = -1
self._pass_buf_step = -1
return self._pass_type
# elif self.passAway() and self._curDir[0] > 0.0:
# return Action.HighPass
# elif self.shortPassForShot():
# return Action.ShortPass
else:
self._shot_dir_ready = False
self._pass_dir_ready = False
doPass, doPassType, doPassIndex = self.passWill()
if doPass:
self._pass_dir_ready = True
self._pass_type = doPassType
self._pass_buf_step = self._remain_step
self._pass_buf_player = self._cur_player
return self.gotoDst(self._ourPos[doPassIndex][0], self._ourPos[doPassIndex][1])
# ADD avoid opponent
if self._closest_enemey_to_cur_vec[0] > 0:
# closest enemy behind me and left
if not self._sprinting and self.should_sprint():
return Action.Sprint
if self._dribbling and dist(self._curPos, self._closest_enemey_pos) > 0.02:
return Action.ReleaseDribble
return self.gobetweenKeeperGate()
elif dist(self._curPos, self._closest_enemey_pos) < 0.02:
# enemy too close, start dribble
# if not self._dribbling:
# return Action.Dribble
# enemy infront of me, try to cut an angle
return self.cutAngleWithClosest()
else:
# no enemy near me
if self._dribbling:
return Action.ReleaseDribble
if not self._sprinting:
return Action.Sprint
# ADD release sprint
# if self._sprinting and not self.should_sprint():
# return Action.ReleaseSprintt
# elif not insideArea(curPos, SPRINT_AREA) and Action.Sprint in obs['sticky_actions']:
# return Action.ReleaseSprint
return self.gobetweenKeeperGate()
elif self._we_have_ball and not self._our_goalkeeper_have_ball and not self._our_active_have_ball:
self._shot_dir_ready = False
return self.gotoDst(self._goalPos[0], self._goalPos[1])
elif self._our_goalkeeper_have_ball:
self._shot_dir_ready = False
if self._our_goalkeeper_active:
return Action.HighPass
if self._sprinting:
return Action.ReleaseSprint
return self.gobetweenKeeperGate()
self._shot_dir_ready = False
# ball in enemy or ball free
if self._dribbling:
return Action.ReleaseDribble
if self._ball_is_free:
if not self._sprinting and self.should_sprint():
return Action.Sprint
return self.gotoDst(self._ballPos[0] + 2 * self._ball_dir[0], self._ballPos[1] + 2 * self._ball_dir[1])
if self._enemy_have_ball:
# TODO
# defense now!
# if you are can't catch him and you are not the closest one to gate, just quit chasing.
"""
if not self.should_chase():
if self._sprinting:
return Action.ReleaseSprint
return Action.Idle
if self.should_slide():
return Action.Slide
"""
if not self._sprinting and self.should_sprint() and self.should_chase():
return Action.Sprint
# intersect the ball, see https://www.kaggle.com/c/google-football/discussion/191804
return self.gotoDst(
self._ballPos[0] + 1 * self._ball_dir[0] + 1 * self._ball_owner_dir[0],
self._ballPos[1] + 1 * self._ball_dir[1] + 1 * self._ball_owner_dir[1]
)
return self.gotoDst(self._goalPos[0], self._goalPos[1])
processer = Processer()
# @human_readable_agent
def agent(obs):
global processer
return processer.process(obs)
def raw_obs_to_readable(obs):
# print("obs = ", obs)
# print("obs sticky=", obs['active_player_sticky_actions'])
obs['sticky_actions'] = {sticky_index_to_action[nr] for nr, action in enumerate(obs['sticky_actions']) if action}
# Turn 'game_mode' into an enum.
obs['game_mode'] = GameMode(obs['game_mode'])
# In case of single agent mode, 'designated' is always equal to 'active'.
if 'designated' in obs:
del obs['designated']
# Conver players' roles to enum.
obs['left_team_roles'] = [PlayerRole(role) for role in obs['left_team_roles']]
obs['right_team_roles'] = [PlayerRole(role) for role in obs['right_team_roles']]
return obs
def rule_agent(obs):
# obs = obs[0]
obs = raw_obs_to_readable(obs)
return agent(obs).value
def idel_agent(obs):
return 0
def random_agent(obs):
return random.randint(0, 18)
agents_map = {"random": random_agent, "rule": rule_agent, "idel": idel_agent}
@MODEL_REGISTRY.register('football_rule')
class FootballRuleBaseModel(torch.nn.Module):
def __init__(self, cfg={}):
super(FootballRuleBaseModel, self).__init__()
self.agent_type = cfg.get('agent_type', 'rule')
self._agent = agents_map[self.agent_type]
# be compatiable with bc policy
# to avoid: ValueError: optimizer got an empty parameter list
self._dummy_param = nn.Parameter(torch.zeros(1, 1))
def forward(self, data):
actions = []
data = data['raw_obs']
if isinstance(data['score'], list):
# to be compatiable with collect phase in subprocess mode
data['score'] = torch.stack(data['score'], dim=-1)
# dict of raw observations -> list of dict, each element in the list is the raw obs in one timestep
data = [{k: v[i] for k, v in data.items()} for i in range(data['left_team'].shape[0])]
for d in data:
# the rew obs in one timestep
if isinstance(d['steps_left'], torch.Tensor):
d = {k: v.cpu() for k, v in d.items()}
d = to_ndarray(d)
for k in ['active', 'designated', 'ball_owned_player', 'ball_owned_team']:
d[k] = int(d[k])
actions.append(self._agent(d))
return {'action': torch.LongTensor(actions), 'logit': one_hot(torch.LongTensor(actions), 19)} | PypiClean |
/ConSSL-0.0.1-py3-none-any.whl/CSSL/models/self_supervised/swav/transforms.py | from typing import List
import numpy as np
from CSSL.utils import _OPENCV_AVAILABLE, _TORCHVISION_AVAILABLE
from CSSL.utils.warnings import warn_missing_pkg
if _TORCHVISION_AVAILABLE:
from torchvision import transforms as transforms
else: # pragma: no cover
warn_missing_pkg('torchvision')
if _OPENCV_AVAILABLE:
import cv2
else: # pragma: no cover
warn_missing_pkg('cv2', pypi_name='opencv-python')
class SwAVTrainDataTransform(object):
def __init__(
self,
normalize=None,
size_crops: List[int] = [96, 36],
nmb_crops: List[int] = [2, 4],
min_scale_crops: List[float] = [0.33, 0.10],
max_scale_crops: List[float] = [1, 0.33],
gaussian_blur: bool = True,
jitter_strength: float = 1.
):
self.jitter_strength = jitter_strength
self.gaussian_blur = gaussian_blur
assert len(size_crops) == len(nmb_crops)
assert len(min_scale_crops) == len(nmb_crops)
assert len(max_scale_crops) == len(nmb_crops)
self.size_crops = size_crops
self.nmb_crops = nmb_crops
self.min_scale_crops = min_scale_crops
self.max_scale_crops = max_scale_crops
self.color_jitter = transforms.ColorJitter(
0.8 * self.jitter_strength, 0.8 * self.jitter_strength, 0.8 * self.jitter_strength,
0.2 * self.jitter_strength
)
transform = []
color_transform = [transforms.RandomApply([self.color_jitter], p=0.8), transforms.RandomGrayscale(p=0.2)]
if self.gaussian_blur:
kernel_size = int(0.1 * self.size_crops[0])
if kernel_size % 2 == 0:
kernel_size += 1
color_transform.append(GaussianBlur(kernel_size=kernel_size, p=0.5))
self.color_transform = transforms.Compose(color_transform)
if normalize is None:
self.final_transform = transforms.ToTensor()
else:
self.final_transform = transforms.Compose([transforms.ToTensor(), normalize])
for i in range(len(self.size_crops)):
random_resized_crop = transforms.RandomResizedCrop(
self.size_crops[i],
scale=(self.min_scale_crops[i], self.max_scale_crops[i]),
)
transform.extend([
transforms.Compose([
random_resized_crop,
transforms.RandomHorizontalFlip(p=0.5), self.color_transform, self.final_transform
])
] * self.nmb_crops[i])
self.transform = transform
# add online train transform of the size of global view
online_train_transform = transforms.Compose([
transforms.RandomResizedCrop(self.size_crops[0]),
transforms.RandomHorizontalFlip(), self.final_transform
])
self.transform.append(online_train_transform)
def __call__(self, sample):
multi_crops = list(map(lambda transform: transform(sample), self.transform))
return multi_crops
class SwAVEvalDataTransform(SwAVTrainDataTransform):
def __init__(
self,
normalize=None,
size_crops: List[int] = [96, 36],
nmb_crops: List[int] = [2, 4],
min_scale_crops: List[float] = [0.33, 0.10],
max_scale_crops: List[float] = [1, 0.33],
gaussian_blur: bool = True,
jitter_strength: float = 1.
):
super().__init__(
normalize=normalize,
size_crops=size_crops,
nmb_crops=nmb_crops,
min_scale_crops=min_scale_crops,
max_scale_crops=max_scale_crops,
gaussian_blur=gaussian_blur,
jitter_strength=jitter_strength
)
input_height = self.size_crops[0] # get global view crop
test_transform = transforms.Compose([
transforms.Resize(int(input_height + 0.1 * input_height)),
transforms.CenterCrop(input_height),
self.final_transform,
])
# replace last transform to eval transform in self.transform list
self.transform[-1] = test_transform
class SwAVFinetuneTransform(object):
def __init__(
self,
input_height: int = 224,
jitter_strength: float = 1.,
normalize=None,
eval_transform: bool = False
) -> None:
self.jitter_strength = jitter_strength
self.input_height = input_height
self.normalize = normalize
self.color_jitter = transforms.ColorJitter(
0.8 * self.jitter_strength,
0.8 * self.jitter_strength,
0.8 * self.jitter_strength,
0.2 * self.jitter_strength,
)
if not eval_transform:
data_transforms = [
transforms.RandomResizedCrop(size=self.input_height),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply([self.color_jitter], p=0.8),
transforms.RandomGrayscale(p=0.2)
]
else:
data_transforms = [
transforms.Resize(int(self.input_height + 0.1 * self.input_height)),
transforms.CenterCrop(self.input_height)
]
if normalize is None:
final_transform = transforms.ToTensor()
else:
final_transform = transforms.Compose([transforms.ToTensor(), normalize])
data_transforms.append(final_transform)
self.transform = transforms.Compose(data_transforms)
def __call__(self, sample):
return self.transform(sample)
class GaussianBlur(object):
# Implements Gaussian blur as described in the SimCLR paper
def __init__(self, kernel_size, p=0.5, min=0.1, max=2.0):
self.min = min
self.max = max
# kernel size is set to be 10% of the image height/width
self.kernel_size = kernel_size
self.p = p
def __call__(self, sample):
sample = np.array(sample)
# blur the image with a 50% chance
prob = np.random.random_sample()
if prob < self.p:
sigma = (self.max - self.min) * np.random.random_sample() + self.min
sample = cv2.GaussianBlur(sample, (self.kernel_size, self.kernel_size), sigma)
return sample | PypiClean |
/GenIce2-2.1.7.1.tar.gz/GenIce2-2.1.7.1/genice2/lattices/engel26.py | from genice2.cell import cellvectors
import genice2.lattices
import numpy as np
desc = {
"ref": {
"PCOD8321499": "Engel 2018",
"engel26": "Engel 2018"
},
"usage": "No options available.",
"brief": "Hypothetical zeolitic ice"
}
class Lattice(genice2.lattices.Lattice):
def __init__(self):
self.cell = cellvectors(
a=20.09717,
b=20.04341,
c=20.95821,
A=89.5522,
B=89.6358,
C=60.9317
)
self.waters = np.array([
[0.488392, -0.145329, 0.404387],
[0.218323, -0.218308, 0.148320],
[0.496688, -0.356575, 0.055932],
[-0.171991, -0.015769, 0.325318],
[0.209368, -0.011642, 0.308165],
[-0.117154, 0.453008, 0.392823],
[0.030348, 0.172379, 0.488752],
[0.073805, 0.456589, 0.233702],
[-0.124930, -0.358860, 0.074103],
[0.492306, 0.453049, 0.392440],
[0.216450, -0.409123, 0.314221],
[0.679844, -0.148704, 0.230050],
[0.487796, 0.047896, 0.069571],
[0.632770, 0.179649, 0.481680],
[0.214257, 0.183722, 0.151263],
[0.687112, 0.453543, 0.229988],
[-0.171827, 0.171851, 0.148306],
[0.027318, -0.222809, 0.478221],
[0.488737, -0.144275, -0.092670],
[0.220967, -0.214751, 0.648680],
[0.499913, -0.359152, 0.555487],
[-0.172026, -0.015895, -0.178239],
[0.211017, -0.008350, -0.191810],
[-0.116996, 0.452866, -0.104109],
[0.027599, 0.173436, -0.011312],
[0.070334, 0.460757, 0.733576],
[-0.127029, -0.354545, 0.573695],
[0.492425, 0.452916, -0.104289],
[0.218653, -0.409213, -0.189391],
[0.677734, -0.149389, 0.730651],
[0.484568, 0.046995, 0.569816],
[0.636751, 0.177648, -0.018353],
[0.211143, 0.187636, 0.651696],
[0.686981, 0.453682, 0.733547],
[-0.174047, 0.174696, 0.648026],
[0.026550, -0.224306, -0.025459],
])
self.coord = 'relative' | PypiClean |
/AnyBlok-2.1.0.tar.gz/AnyBlok-2.1.0/anyblok/field.py | from sqlalchemy.ext.hybrid import hybrid_property
from anyblok.common import anyblok_column_prefix
from anyblok.mapper import ModelRepr
class FieldException(Exception):
"""Simple Exception for Field"""
class Field:
"""Field class
This class must not be instanciated
"""
use_hybrid_property = False
def __init__(self, *args, **kwargs):
"""Initialize the field
:param label: label of this field
:type label: str
"""
self.forbid_instance(Field)
self.label = None
self.ignore_migration = kwargs.pop("ignore_migration", False)
if "label" in kwargs:
self.label = kwargs.pop("label")
self.context = kwargs.pop("context", {})
self.args = args
self.kwargs = kwargs
def forbid_instance(self, cls):
"""Raise an exception if the cls is an instance of this __class__
:param cls: instance of the class
:exception: FieldException
"""
if self.__class__ is cls:
raise FieldException(
"%r class must not be instanciated use a sub class" % cls
)
def update_description(self, registry, model, res):
res.update(self.context)
def update_properties(self, registry, namespace, fieldname, properties):
"""Update the propertie use to add new column
:param registry: current registry
:param namespace: name of the model
:param fieldname: name of the field
:param properties: properties known to the model
"""
if self.ignore_migration:
table = registry.loaded_namespaces_first_step[namespace][
"__tablename__"
]
ignore_migration_for = registry.ignore_migration_for.get(table)
if ignore_migration_for is True:
return # pragma: no cover
elif not ignore_migration_for:
registry.ignore_migration_for[table] = [fieldname]
else:
ignore_migration_for.append(fieldname) # pragma: no cover
def get_property(self, registry, namespace, fieldname, properties):
"""Return the property of the field
.. warning::
In the case of the get is called in classattribute,
SQLAlchemy wrap for each call the column, the id of the wrapper
is not the same
:param registry: current registry
:param namespace: name of the model
:param fieldname: name of the field
:param properties: properties known to the model
"""
fget = self.wrap_getter_column(fieldname)
fset = self.wrap_setter_column(fieldname)
fexp = self.wrap_expr_column(fieldname)
for func in (fget, fset, fexp):
func.__name__ = fieldname
hybrid = hybrid_property(fget)
hybrid = hybrid.setter(fset)
hybrid = hybrid.expression(fexp)
return hybrid
def getter_format_value(self, value):
return value
def wrap_getter_column(self, fieldname):
"""Return a default getter for the field
:param fieldname: name of the field
"""
attr_name = anyblok_column_prefix + fieldname
def getter_column(model_self):
return self.getter_format_value(getattr(model_self, attr_name))
return getter_column
def wrap_expr_column(self, fieldname):
"""Return a default expr for the field
:param fieldname: name of the field
"""
attr_name = anyblok_column_prefix + fieldname
def expr_column(model_cls):
return getattr(model_cls, attr_name)
return expr_column
def expire_related_attribute(self, model_self, action_todos):
for action_todo in action_todos:
if len(action_todo) == 1:
obj = model_self
attrs = [action_todo[0]]
else:
obj = getattr(model_self, action_todo[0])
attrs = [action_todo[1]]
if obj is None:
continue
if obj in model_self.anyblok.session:
if obj._sa_instance_state.persistent:
model_self.anyblok.expire(obj, attrs)
def setter_format_value(self, value):
return value
def wrap_setter_column(self, fieldname):
attr_name = anyblok_column_prefix + fieldname
def setter_column(model_self, value):
action_todos = set()
if fieldname in model_self.loaded_columns:
action_todos = model_self.anyblok.expire_attributes.get(
model_self.__registry_name__, {}
).get(fieldname, set())
self.expire_related_attribute(model_self, action_todos)
value = self.setter_format_value(value)
res = setattr(model_self, attr_name, value)
self.expire_related_attribute(model_self, action_todos)
return res
return setter_column
def get_sqlalchemy_mapping(
self, registry, namespace, fieldname, properties
):
"""Return the instance of the real field
:param registry: current registry
:param namespace: name of the model
:param fieldname: name of the field
:param properties: properties known of the model
:rtype: instance of Field
"""
self.format_label(fieldname)
return self
def format_label(self, fieldname):
"""Return the label for this field
:param fieldname: if no label filled, the fieldname will be capitalized
and returned
:rtype: the label for this field
"""
if not self.label:
label = fieldname.replace("_", " ")
self.label = label.capitalize()
def native_type(self, registry):
"""Return the native SqlAlchemy type
:exception: FieldException
"""
raise FieldException(
"No native type for this field"
) # pragma: no cover
def must_be_declared_as_attr(self):
"""Return False, it is the default value"""
return False
def must_be_copied_before_declaration(self):
"""Return False, it is the default value"""
return False
def autodoc_get_properties(self):
res = {"Type": self.__class__}
res["Context"] = self.context
res["Label"] = self.label
res.update(self.kwargs)
return res
autodoc_omit_property_values = set(
(
("Label", None),
("Context", None),
)
)
def autodoc_format_dict(self, key, value, level=0):
bullets = ["*", "+", "•", "‣"]
bullet = bullets[level]
padding = " " * level
key = key.strip()
if isinstance(value, dict): # pragma: no cover
res = padding + "%c ``%s``:\n\n" % (bullet, key)
res += "\n".join(
[
self.autodoc_format_dict(x, y, level=level + 1)
for x, y in value.items()
]
)
res += "\n"
return res
elif isinstance(value, (list, tuple)): # pragma: no cover
res = padding + "%c ``%s``:\n\n" % (bullet, key)
next_bullet = bullets[level + 1]
res += "\n".join(
padding + " %c ``%r``" % (next_bullet, x) for x in value
)
res += "\n"
return res
else:
if isinstance(value, type):
rst_val = ":class:`%s.%s`" % (value.__module__, value.__name__)
elif isinstance(value, ModelRepr): # pragma: no cover
rst_val = value.model_name
else:
rst_val = "``%r``" % value
return padding + "%c ``%s`` - %s" % (bullet, key, rst_val)
def autodoc_do_omit(self, k, v):
"""Maybe convert, then check in :attr:`autodoc_omit_property_values`
Mutable types aren't hashable, and usually, if not empty, it makes
sense to display them. Therefore, we replace them by None if
empty to decide and let through otherwise.
Hence, to exclude empty Context from autodoc, is done by putting
``('Context', None)`` in :attr:`autodoc_omit_property_values`
"""
if isinstance(v, list) or isinstance(v, dict) or isinstance(v, set):
if v:
return True
v = None
return (k, v) in self.autodoc_omit_property_values
def autodoc(self):
return "\n".join(
self.autodoc_format_dict(x, y)
for x, y in self.autodoc_get_properties().items()
if not self.autodoc_do_omit(x, y)
)
class Function(Field):
"""Function Field
::
from anyblok.declarations import Declarations
from anyblok.field import Function
@Declarations.register(Declarations.Model)
class Test:
x = Function(fget='fget', fset='fset', fdel='fdel', fexp='fexpr',
fuexpr='fuexpr')
..warning::
fexpr must be a classmethod
fuexpr must be a classmethod
"""
def get_sqlalchemy_mapping(
self, registry, namespace, fieldname, properties
):
"""Return the property of the field
:param registry: current registry
:param namespace: name of the model
:param fieldname: name of the field
:param properties: properties known to the model
"""
def wrap(method):
m = self.kwargs.get(method)
if m is None:
return None
def function_method(model_self, *args, **kwargs):
if method == "fget":
cls = registry.get(model_self.__registry_name__)
if model_self is cls:
return hasattr(model_self, m)
return getattr(model_self, m)(*args, **kwargs)
function_method.__name__ = fieldname
return function_method
fget = wrap("fget")
fset = wrap("fset")
fdel = wrap("fdel")
fexpr = wrap("fexpr")
fuexpr = wrap("fuexpr")
hybrid = hybrid_property(fget)
hybrid = hybrid.setter(fset)
hybrid = hybrid.deleter(fdel)
hybrid = hybrid.expression(fexpr)
hybrid = hybrid.update_expression(fuexpr)
self.format_label(fieldname)
properties["loaded_fields"][fieldname] = self.label
return hybrid
def format_struc(entry, keys):
key = keys[0]
if len(keys) == 1:
if key not in entry:
entry[key] = None
else:
if key not in entry:
entry[key] = {}
format_struc(entry[key], keys[1:])
class JsonRelated(Field):
"""Json Related Field
::
from anyblok.declarations import Declarations
from anyblok.field import JsonRelated
from anyblok.column import Json
@Declarations.register(Declarations.Model)
class Test:
properties = Json()
x = JsonRelated(json_column='properties', keys=['x'])
"""
def __init__(self, *args, **kwargs):
self.json_column = kwargs.pop("json_column", None)
if self.json_column is None:
raise FieldException(
"json_column is a required attribute for " "JsonRelated"
)
self.keys = kwargs.pop("keys", None)
if self.keys is None:
raise FieldException("keys is a required attribute for JsonRelated")
self.get_adapter = kwargs.pop("get_adapter", None)
self.set_adapter = kwargs.pop("set_adapter", None)
super(JsonRelated, self).__init__(*args, **kwargs)
def get_fget(self):
def fget(model_self):
json_column = getattr(model_self, self.json_column)
if json_column is None:
json_column = {}
format_struc(json_column, self.keys)
entry = json_column
for key in self.keys:
entry = entry[key]
if entry and self.get_adapter:
get_adapter = self.get_adapter
if isinstance(get_adapter, str):
get_adapter = getattr(model_self, get_adapter)
entry = get_adapter(entry)
return entry
return fget
def get_fset(self):
def fset(model_self, value):
json_column = getattr(model_self, self.json_column)
if json_column is None:
json_column = {}
format_struc(json_column, self.keys)
entry = json_column
for key in self.keys[:-1]:
entry = entry[key]
if value and self.set_adapter:
set_adapter = self.set_adapter
if isinstance(set_adapter, str):
set_adapter = getattr(model_self, set_adapter)
value = set_adapter(value)
entry[self.keys[-1]] = value
setattr(model_self, self.json_column, json_column)
return fset
def get_fdel(self):
def fdel(model_self):
json_column = getattr(model_self, self.json_column)
if json_column is None:
json_column = {}
format_struc(json_column, self.keys)
entry = json_column
for key in self.keys[:-1]:
entry = entry[key]
entry[self.keys[-1]] = None
setattr(model_self, self.json_column, json_column)
return fdel
def get_fexpr(self):
def fexpr(model_cls):
entry = getattr(model_cls, self.json_column)
for key in self.keys:
entry = entry[key]
return entry
return fexpr
def get_sqlalchemy_mapping(
self, registry, namespace, fieldname, properties
):
"""Return the property of the field
:param registry: current registry
:param namespace: name of the model
:param fieldname: name of the field
:param properties: properties known to the model
"""
self.format_label(fieldname)
properties["loaded_fields"][fieldname] = self.label
fget = self.get_fget()
fset = self.get_fset()
fdel = self.get_fdel()
fexpr = self.get_fexpr()
for func in (fget, fset, fdel, fexpr):
func.__name__ = fieldname
hybrid = hybrid_property(fget)
hybrid = hybrid.setter(fset)
hybrid = hybrid.deleter(fdel)
hybrid = hybrid.expression(fexpr)
return hybrid
def autodoc_get_properties(self):
res = super(JsonRelated, self).autodoc_get_properties()
res.update(
{
"json_column": self.json_column,
"keys": " -> ".join(self.keys),
}
)
return res | PypiClean |
/FMOARPG-0.0.7-py3-none-any.whl/BIM360/__init__.py | import requests
import BIM360
import CLASSES
import DATAMANAGEMENT
import Utils
from datetime import date
def getToken(client_id, client_secret, credentials):
"""Obtain Forge token given a client id & secret"""
req = { 'client_id' : client_id, 'client_secret': client_secret, 'grant_type' : 'client_credentials','scope': credentials}
resp = requests.post('https://developer.api.autodesk.com/authentication/v1/authenticate', req).json()
#return resp['token_type'] + " " + resp['access_token']
#return resp['access_token']
token = CLASSES.Token(resp['access_token'], resp['token_type'], resp['expires_in'], date.today())
return token
def getAccountUsers(token, hubId):
url = "https://developer.api.autodesk.com/hq/v1/accounts/{0}/users".format(hubId)
payload = ""
headers = {
'authorization': "Bearer {0}".format(token)
}
response = requests.request("GET", url, data=payload, headers=headers)
usuarios = []
for user in response.json():
usuario = CLASSES.User(user['id'], user['email'], user['name'], "NA", "NA", "NA")
usuarios.append(usuario)
return usuarios
def getProjectUsers(token, pjtId, offset):
url = "https://developer.api.autodesk.com/bim360/admin/v1/projects/{0}/users".format(pjtId)
querystring = {"limit":"200", "offset":str(offset)}
headers = {
'authorization': "Bearer "+token
}
response = requests.request("GET", url, headers=headers, params=querystring)
usuarios = []
for user in response.json()['results']:
usuario = CLASSES.User(user['id'], user['email'],user['name'], user['jobTitle'], user['roleIds'], user['companyId'])
usuario.autodeskId = user['autodeskId']
usuarios.append(usuario)
return [[response.json()['pagination']['limit'], response.json()['pagination']['offset'], response.json()['pagination']['totalResults']], usuarios]
def getBIM360Projects(token, hubId, offset):
url = "https://developer.api.autodesk.com/hq/v1/accounts/{0}/projects".format(hubId)
querystring = {"limit":"100","sort":"name", "offset":str(offset)}
headers = {
'authorization': "Bearer "+token
}
response = requests.request("GET", url, headers=headers, params=querystring)
projetos = []
for project in response.json():
projeto = CLASSES.Projeto(project['name'], project['id'], hubId, "")
projeto.city = project['city']
projeto.construction_type = project['construction_type']
projeto.country = project['country']
projeto.end_date = project['end_date']
projeto.start_date = project['start_date']
projeto.state_or_province = project['state_or_province']
projeto.project_type = project['project_type']
projeto.status = project['status']
projetos.append(projeto)
return projetos
def getCustomAttribute(token, urnas, pjtId):
url = "https://developer.api.autodesk.com/bim360/docs/v1/projects/{0}/versions:batch-get".format(pjtId)
#payload = "{\"urns\": [\"urn:adsk.wipprod:dm.lineage:XLhFbCdgQvKG6x9W6oA5fg\",\"urn:adsk.wipprod:dm.lineage:OhEGeSv8SFOkvK-bFQZEQQ\"]}"
payload = "{\"urns\": "+Utils.ListToJsonString(urnas)+"}"
headers = {
'content-type': "application/json",
'authorization': "Bearer "+token
}
response = requests.request("POST", url, data=payload, headers=headers)
atributos = []
for attributes in response.json()['results']:
for attribute in attributes['customAttributes']:
atributo = CLASSES.CustomAttribute(attribute['name'], attribute['value'])
atributos.append(atributo)
return atributos
def getReviewActivity(token, pjtId):
url = "https://developer.api.autodesk.com/bim360/admin/v1/projects/{0}/activities".format(pjtId)
querystring = {"limit":"100"}
headers = {
'accept-language': "pt-BR",
'authorization': "Bearer "+token
}
response = requests.request("GET", url, headers=headers, params=querystring)
atividades = []
for atividade in response.json()["streamItems"]:
if atividade['activity']['verb']=="initiate-review-process" or atividade['activity']['verb']=="claim-review-task" or atividade['activity']['verb']=="submit-review":
novaAtividade = CLASSES.Atividade(atividade['activity']['published'], atividade['activity']['generator'], atividade['activity']['actor']['displayName'], atividade['activity']['verb'], atividade['activity']['object']['displayName'], atividade['activity']['object']['id'], atividade['activity']['object']['project']['id'], atividade['activity']['object']['project']['displayName'])
atividades.append(novaAtividade)
print("nova atividade "+novaAtividade.objeto_displayName)
return [response.json()['nextToken'], atividades]
def getReviewActivityToken(token, pjtId, pageToken):
url = "https://developer.api.autodesk.com/bim360/admin/v1/projects/{0}/activities".format(pjtId)
querystring = {"limit":"100","token":pageToken}
headers = {
'accept-language': "pt-BR",
'authorization': "Bearer "+token
}
response = requests.request("GET", url, headers=headers, params=querystring)
atividades = []
for atividade in response.json()["streamItems"]:
if atividade['activity']['verb']=="initiate-review-process" or atividade['activity']['verb']=="claim-review-task" or atividade['activity']['verb']=="submit-review":
novaAtividade = CLASSES.Atividade(atividade['activity']['published'], atividade['activity']['generator'], atividade['activity']['actor']['displayName'], atividade['activity']['verb'], atividade['activity']['object']['displayName'], atividade['activity']['object']['id'], atividade['activity']['object']['project']['id'], atividade['activity']['object']['project']['displayName'])
atividades.append(novaAtividade)
nextToken = ""
try:
nextToken = response.json()['nextToken']
except:
pass
return [nextToken, atividades]
def getProjectIssues(token, container):
url = "https://developer.api.autodesk.com/issues/v1/containers/{0}/quality-issues".format(container)
querystring = {"page[limit]":"100"}
headers = {
'authorization': "Bearer "+token
}
issues = []
response = requests.request("GET", url, headers=headers, params=querystring)
for issue in response.json()['data']:
issue_obj = CLASSES.Issue()
issue_obj.created_at = issue['attributes']['created_at']
issue_obj.closed_at = issue['attributes']['closed_at']
issue_obj.closed_by = issue['attributes']['closed_by']
issue_obj.created_by = issue['attributes']['created_by']
issue_obj.opened_at = issue['attributes']['opened_at']
issue_obj.opened_by = issue['attributes']['opened_by']
issue_obj.updated_by = issue['attributes']['updated_by']
issue_obj.title = issue['attributes']['title']
issue_obj.description = issue['attributes']['description']
issue_obj.due_date = issue['attributes']['due_date']
issue_obj.status = issue['attributes']['status']
issue_obj.assigned_to = issue['attributes']['assigned_to']
issue_obj.assigned_to_type = issue['attributes']['assigned_to_type']
issue_obj.updated_at = issue['attributes']['updated_at']
issues.append(issue_obj)
return issues
def GetCompanies(token, hubId):
url = "https://developer.api.autodesk.com/hq/v1/accounts/{0}/companies".format(hubId)
querystring = {"limit":"100"}
headers = {
'authorization': "Bearer "+token
}
response = requests.request("GET", url, headers=headers, params=querystring)
empresas = []
for company in response.json():
empresa = CLASSES.Company(company['id'], company['name'], company['city'], company['state_or_province'], company['country'])
empresas.append(empresa)
return empresas | PypiClean |
/BenchExec-3.17.tar.gz/BenchExec-3.17/contrib/vcloud/vcloudutil.py |
import collections
import os
import sys
import benchexec.util
sys.dont_write_bytecode = True # prevent creation of .pyc files
def parse_vcloud_run_result(values):
result_values = collections.OrderedDict()
def parse_time_value(s):
if s[-1] != "s":
raise ValueError(f'Cannot parse "{s}" as a time value.')
return float(s[:-1])
def set_exitcode(new):
if "exitcode" in result_values:
old = result_values["exitcode"]
assert (
old == new
), f"Inconsistent exit codes {old} and {new} from VerifierCloud"
else:
result_values["exitcode"] = new
for key, value in values:
value = value.strip()
if key in ["cputime", "walltime"]:
result_values[key] = parse_time_value(value)
elif key == "memory":
result_values["memory"] = int(value.strip("B"))
elif key == "exitcode":
set_exitcode(benchexec.util.ProcessExitCode.from_raw(int(value)))
elif key == "returnvalue":
set_exitcode(benchexec.util.ProcessExitCode.create(value=int(value)))
elif key == "exitsignal":
set_exitcode(benchexec.util.ProcessExitCode.create(signal=int(value)))
elif (
key in ["host", "terminationreason", "cpuCores", "memoryNodes", "starttime"]
or key.startswith("blkio-")
or key.startswith("cpuenergy")
or key.startswith("energy-")
or key.startswith("cputime-cpu")
):
result_values[key] = value
elif key not in ["command", "timeLimit", "coreLimit", "memoryLimit"]:
result_values["vcloud-" + key] = value
return result_values
def parse_frequency_value(s):
# Contrary to benchexec.util.parse_frequency_value, this handles float values.
if not s:
return s
s = s.strip()
pos = len(s)
while pos and not s[pos - 1].isdigit():
pos -= 1
number = float(s[:pos])
unit = s[pos:].strip()
if not unit or unit == "Hz":
return int(number)
elif unit == "kHz":
return int(number * 1000)
elif unit == "MHz":
return int(number * 1000 * 1000)
elif unit == "GHz":
return int(number * 1000 * 1000 * 1000)
else:
raise ValueError(f"unknown unit: {unit} (allowed are Hz, kHz, MHz, and GHz)")
def is_windows():
return os.name == "nt"
def force_linux_path(path):
if is_windows():
return path.replace("\\", "/")
return path | PypiClean |
/Kahi_scienti_sources-0.0.2a0.tar.gz/Kahi_scienti_sources-0.0.2a0/kahi_scienti_sources/Kahi_scienti_sources.py | from kahi.KahiBase import KahiBase
from pymongo import MongoClient
from datetime import datetime as dt
from time import time
from langid import classify
class Kahi_scienti_sources(KahiBase):
config = {}
def __init__(self, config):
self.config = config
self.mongodb_url = config["database_url"]
self.client = MongoClient(self.mongodb_url)
self.db = self.client[config["database_name"]]
self.collection = self.db["sources"]
self.collection.create_index("external_ids.id")
self.already_in_db = []
def update_scienti(self, reg, entry, issn):
updated_scienti = False
for upd in entry["updated"]:
if upd["source"] == "scienti":
updated_scienti = True
entry["updated"].remove(upd)
entry["updated"].append(
{"source": "scienti", "time": int(time())})
break
if not updated_scienti:
entry["updated"].append({"source": "scienti", "time": int(time())})
journal = None
for detail in reg["details"]:
if "article" in detail.keys():
paper = detail["article"][0]
if "journal" in paper.keys():
journal = paper["journal"][0]
break
if not journal:
return
if "TPO_REVISTA" in journal.keys():
entry["types"].append(
{"source": "scienti", "type": journal["TPO_REVISTA"]})
entry["external_ids"].append(
{"source": "scienti", "id": journal["COD_REVISTA"]})
rankings_list = []
ranks = []
dates = [(rank["from_date"], rank["to_date"])
for rank in entry["ranking"] if rank["source"] == "scienti"]
for reg_scienti in self.scienti_collection["products"].find({"details.article.journal.TXT_ISSN_SEP": issn}):
paper = None
journal = None
for detail in reg_scienti["details"]:
if "article" in detail.keys():
paper = detail["article"][0]
if "journal" in paper.keys():
journal = paper["journal"][0]
break
if "TPO_CLASIFICACION" not in journal.keys():
continue
if not journal["TPO_CLASIFICACION"] in ranks:
ranking = {
"from_date": int(dt.strptime(paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp()),
"to_date": int(dt.strptime(paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp()),
"rank": journal["TPO_CLASIFICACION"],
"issn": issn,
"order": None,
"source": "scienti"
}
rankings_list.append(ranking)
ranks.append(journal["TPO_CLASIFICACION"])
dates_tuple = (
int(dt.strptime(
paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp()),
int(dt.strptime(
paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp())
)
dates.append(dates_tuple)
else:
idx = ranks.index(journal["TPO_CLASIFICACION"])
date1, date2 = dates[idx]
if date1 > int(dt.strptime(paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp()):
date1 = int(dt.strptime(
paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp())
if date2 < int(dt.strptime(paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp()):
date2 = int(dt.strptime(
paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp())
dates[idx] = (date1, date2)
self.collection.update_one({"_id": entry["_id"]}, {"$set": {
"types": entry["types"],
"external_ids": entry["external_ids"],
"updated": entry["updated"],
"ranking": entry["ranking"] + rankings_list
}})
def process_scienti(self, config, verbose=0):
self.scienti_client = MongoClient(config["database_url"])
if config["database_name"] not in self.scienti_client.list_database_names():
raise Exception("Database {} not found".format(
config["database_name"]))
self.scienti_db = self.scienti_client[config["database_name"]]
if config["collection_name"] not in self.scienti_db.list_collection_names():
raise Exception("Collection {} not found".format(
config["collection_name"]))
self.scienti_collection = self.scienti_db[config["collection_name"]]
for issn in self.scienti_collection.distinct("details.article.journal.TXT_ISSN_SEP"):
reg_db = self.collection.find_one({"external_ids.id": issn})
if reg_db:
reg_scienti = self.scienti_collection.find_one(
{"details.article.journal.TXT_ISSN_SEP": issn})
if reg_scienti:
self.update_scienti(reg_scienti, reg_db, issn)
else:
reg_scienti = self.scienti_collection.find_one(
{"details.article.journal.TXT_ISSN_SEP": issn})
if reg_scienti:
journal = None
for detail in reg_scienti["details"]:
if "article" in detail.keys():
paper = detail["article"][0]
if "journal" in paper.keys():
journal = paper["journal"][0]
break
if not journal:
continue
entry = self.empty_source()
entry["updated"] = [
{"source": "scienti", "time": int(time())}]
lang = classify(journal["TXT_NME_REVISTA"])[0]
entry["names"] = [
{"lang": lang, "name": journal["TXT_NME_REVISTA"], "source": "scienti"}]
entry["external_ids"].append(
{"source": "issn", "id": journal["TXT_ISSN_SEP"]})
entry["external_ids"].append(
{"source": "scienti", "id": journal["COD_REVISTA"]})
if "TPO_REVISTA" in journal.keys():
entry["types"].append(
{"source": "scienti", "type": journal["TPO_REVISTA"]})
if "editorial" in journal.keys():
entry["publisher"] = {
"country_code": "", "name": journal["editorial"][0]["TXT_NME_EDITORIAL"]}
rankings_list = []
ranks = []
dates = []
for reg_scienti in self.scienti_collection.find({"details.article.journal.TXT_ISSN_SEP": issn}):
paper = None
journal = None
for detail in reg_scienti["details"]:
if "article" in detail.keys():
paper = detail["article"][0]
if "journal" in paper.keys():
journal = paper["journal"][0]
break
if "TPO_CLASIFICACION" not in journal.keys():
continue
if not journal["TPO_CLASIFICACION"] in ranks:
try:
from_date = int(dt.strptime(
paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp())
to_date = int(dt.strptime(
paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp())
except Exception as e:
print(e)
try:
from_date = int(dt.strptime(
paper["DTA_CREACION"], "%Y-%m-%d %H:%M:%S").timestamp())
to_date = int(dt.strptime(
paper["DTA_CREACION"], "%Y-%m-%d %H:%M:%S").timestamp())
except Exception as e:
print(e)
from_date = None
to_date = None
ranking = {
"from_date": from_date,
"to_date": to_date,
"rank": journal["TPO_CLASIFICACION"],
"issn": issn,
"order": None,
"source": "scienti"
}
rankings_list.append(ranking)
ranks.append(journal["TPO_CLASIFICACION"])
try:
dates_tuple = (
int(dt.strptime(
paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp()),
int(dt.strptime(
paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp())
)
except Exception as e:
print(e)
try:
dates_tuple = (
int(dt.strptime(
paper["DTA_CREACION"], "%Y-%m-%d %H:%M:%S").timestamp()),
int(dt.strptime(
paper["DTA_CREACION"], "%Y-%m-%d %H:%M:%S").timestamp())
)
except Exception as e:
print(e)
dates_tuple = (
None,
None
)
dates.append(dates_tuple)
else:
# if is already ranked but dates changed
idx = ranks.index(journal["TPO_CLASIFICACION"])
date1, date2 = dates[idx]
try:
if date1 > int(dt.strptime(paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp()):
date1 = int(dt.strptime(
paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp())
if date2 < int(dt.strptime(paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp()):
date2 = int(dt.strptime(
paper["DTA_CREACION"], "%a, %d %b %Y %H:%M:%S %Z").timestamp())
except Exception as e:
print(e)
try:
if date1 > int(dt.strptime(paper["DTA_CREACION"], "%Y-%m-%d %H:%M:%S").timestamp()):
date1 = int(dt.strptime(
paper["DTA_CREACION"], "%Y-%m-%d %H:%M:%S").timestamp())
if date2 < int(dt.strptime(paper["DTA_CREACION"], "%Y-%m-%d %H:%M:%S").timestamp()):
date2 = int(dt.strptime(
paper["DTA_CREACION"], "%Y-%m-%d %H:%M:%S").timestamp())
except Exception as e:
print(e)
dates[idx] = (date1, date2)
entry["ranking"] = rankings_list
self.collection.insert_one(entry)
def run(self):
for config in self.config["scienti_sources"]:
print("Processing {} database".format(config["database_name"]))
self.process_scienti(config, verbose=5)
return 0 | PypiClean |
/AlgorithmLib-4.0.3.tar.gz/AlgorithmLib-4.0.3/algorithmLib/AEC_EVALUATION/ERLE_ETSIMATION.py | import sys,os
from os import path
sys.path.append('../')
from ctypes import *
from commFunction import emxArray_real_T,get_data_of_ctypes_
import ctypes
import platform
# * -------------------------------------------------------------------------
# * Arguments : const emxArray_real_T *sig_mic
# * const emxArray_real_T *sig_far
# * const emxArray_real_T *sig_ref
# * double fs_mic
# * double fs_far
# * double type
# * double *ERLE
# * double *output_std
# * double *residual_avgdB
# * double *err
# * Return Type : void
# */
# void ERLE_estimation(const emxArray_real_T *sig_mic, const emxArray_real_T
# *sig_far, const emxArray_real_T *sig_ref, double fs_mic,
# double fs_far, double type, double *ERLE, double
# *output_std, double *residual_avgdB, double *err)
def cal_erle(micFile = None,testFile =None, refFile =None,targetType=0):
"""
% type- input signal type:
% 0:Chiness
% 1:English
% 2:Single Digit
% 3:Music
Parameters
----------
inFile
output
refFile
targetType
Returns
-------
"""
instruct,insamplerate,_ = get_data_of_ctypes_(micFile,True)
teststruct,outsamplerate,_ = get_data_of_ctypes_(testFile,True)
refstruct, refsamplerate, _ = get_data_of_ctypes_(refFile,True)
# if refsamplerate != testsamplerate :
# raise TypeError('Different format of ref and test files!')
mydll = None
cur_paltform = platform.platform().split('-')[0]
if cur_paltform == 'Windows':
mydll = ctypes.windll.LoadLibrary(sys.prefix + '/ERLE_estimation.dll')
if cur_paltform == 'macOS':
mydll = CDLL(sys.prefix + '/ERLE_estimation.dylib')
if cur_paltform == 'Linux':
mydll = CDLL(sys.prefix + '/ERLE_estimation.so')
mydll.ERLE_estimation.argtypes = [POINTER(emxArray_real_T),POINTER(emxArray_real_T),POINTER(emxArray_real_T),c_double,c_double,c_double,POINTER(c_double),POINTER(c_double),POINTER(c_double),POINTER(c_double)]
data_format = c_double*11
gain_table = data_format()
DR = data_format()
ERLE,output_std,err,residual_avgdB = c_double(0.0),c_double(0.0),c_double(0.0),c_double(0.0)
mydll.ERLE_estimation(byref(instruct),byref(teststruct),byref(refstruct),c_double(insamplerate),c_double(outsamplerate),c_double(targetType),byref(ERLE),byref(output_std),byref(residual_avgdB),byref(err))
print(err.value)
print(ERLE.value,output_std.value,residual_avgdB.value)
#if err.value == 0.0:
return ERLE.value,output_std.value,residual_avgdB.value
# else:
# return None,None,None
if __name__ == '__main__':
import platform
print(platform.platform().split('-')[0])
# micfile = r'C:\Users\vcloud_avl\Documents\我的POPO\0\stdRefFile.wav'
# test = r'C:\Users\vcloud_avl\Documents\我的POPO\0\mixDstFile.wav'
# ref = R'C:\Users\vcloud_avl\Documents\我的POPO\0\ref_cn.wav'
micfile = r'D:\MARTIN\audiotestalgorithm-master\algorithmLib\AEC_EVALUATION\agoraTestCase_03_None_None\agora_near\0\stdRefFile.wav'
test = r'D:\MARTIN\audiotestalgorithm-master\algorithmLib\AEC_EVALUATION\agoraTestCase_03_None_None\agora_near\0\mixDstFile.wav'
ref = r'D:\MARTIN\audiotestalgorithm-master\algorithmLib\AEC_EVALUATION\agoraTestCase_03_None_None\agora_near\0\ref_cn.wav'
ERLE,output_std,residual_avgdB = cal_erle(micFile=micfile,testFile=test,refFile=ref,targetType=0)
print('ERLE:{},output_std:{},residual_avgdB:{}'.format(ERLE,output_std,residual_avgdB)) | PypiClean |
/M5-0.3.2.tar.gz/M5-0.3.2/lib/scottp-scrollability/scrollability.js | (function() {
// Number of pixels finger must move to determine horizontal or vertical motion
var kLockThreshold = 10;
// Factor which reduces the length of motion by each move of the finger
var kTouchMultiplier = 1;
// Maximum velocity for motion after user releases finger
//var kMaxVelocity = 720 / (window.devicePixelRatio||1);
var kMaxVelocity = 250 / (window.devicePixelRatio||1); //scottp
// Rate of deceleration after user releases finger
//var kDecelRate = 350;
var kDecelRate = 650; //scottp
// Percentage of the page which content can be overscrolled before it must bounce back
var kBounceLimit = 0.5;
// Rate of deceleration when content has overscrolled and is slowing down before bouncing back
var kBounceDecelRate = 600;
// Duration of animation when bouncing back
var kBounceTime = 90;
// Percentage of viewport which must be scrolled past in order to snap to the next page
var kPageLimit = 0.3;
// Velocity at which the animation will advance to the next page
var kPageEscapeVelocity = 50;
// Vertical margin of scrollbar
var kScrollbarMargin = 2;
// Time to scroll to top
var kScrollToTopTime = 200;
var isWebkit = "webkitTransform" in document.documentElement.style;
var isFirefox = "MozTransform" in document.documentElement.style;
var isTouch = "ontouchstart" in window;
// ===============================================================================================
var startX, startY, touchX, touchY, touchDown, touchMoved, onScrollEvt, useOnScrollEvt, justChangedOrientation;
var animationInterval = 0;
var touchTargets = [];
var scrollers = {
'horizontal': createXTarget,
'vertical': createYTarget
};
window.scrollability = {
version: 'scottp-0.71',
globalScrolling: false,
scrollers: scrollers,
useOnScrollEvt: false,
flashIndicators: function() {
var scrollables = document.querySelectorAll('.scrollable.vertical');
for (var i = 0; i < scrollables.length; ++i) {
scrollability.scrollTo(scrollables[i], 0, 0, 20, true);
}
},
scrollToTop: function(animationTime) {
if (elt) {
scrollability.scrollTo(elt, 0, 0, animationTime);
} else {
var scrollables = document.getElementsByClassName('scrollable');
if (scrollables.length) {
var scrollable = scrollables[0];
if (scrollable.className.indexOf('vertical') != -1) {
scrollability.scrollTo(scrollable, 0, 0, animationTime || kScrollToTopTime);
}
}
}
},
scrollTo: function(element, x, y, animationTime, muteDelegate) {
stopAnimation();
var target = createTargetForElement(element);
if (target) {
if (muteDelegate) {
target.delegate = null;
}
target = wrapTarget(target);
touchTargets = [target];
touchMoved = true;
if (animationTime) {
var orig = element[target.key];
var dest = target.filter(x, y);
var dir = dest - orig;
var startTime = new Date().getTime();
animationInterval = setInterval(function() {
var d = new Date().getTime() - startTime;
var pos = orig + ((dest-orig) * (d/animationTime));
if ((dir < 0 && pos < dest) || (dir > 0 && pos > dest)) {
pos = dest;
}
target.updater(pos);
if (pos == dest) {
clearInterval(animationInterval);
setTimeout(stopAnimation, 200);
}
}, 20);
} else {
target.updater(y);
stopAnimation();
}
}
}
};
function onLoad() {
scrollability.flashIndicators();
}
function onScroll(event) {
setTimeout(function() {
if (justChangedOrientation) {
justChangedOrientation = false;
} else if (isTouch) {
scrollability.scrollToTop();
}
});
}
function onOrientationChange(event) {
justChangedOrientation = true;
window.scrollTo(0, 1); // scottp - I added this to force show of nav bar on orientation change
}
function onTouchStart(event) {
stopAnimation();
var touchCandidate = event.target;
var touch = event.touches[0];
var touched = null;
var startTime = new Date().getTime();
touchX = startX = touch.clientX;
touchY = startY = touch.clientY;
touchDown = true;
touchMoved = false;
touchTargets = getTouchTargets(event.target, touchX, touchY, startTime);
if (!touchTargets.length && !scrollability.globalScrolling) {
return true;
}
var holdTimeout = setTimeout(function() {
holdTimeout = 0;
touched = setTouched(touchCandidate);
}, 50);
var d = document;
d.addEventListener('touchmove', onTouchMove, false);
d.addEventListener('touchend', onTouchEnd, false);
animationInterval = setInterval(touchAnimation, 0);
function onTouchMove(event) {
event.preventDefault();
touchMoved = true;
if (holdTimeout) {
clearTimeout(holdTimeout);
holdTimeout = 0;
}
if (touched) {
releaseTouched(touched);
touched = null;
}
var touch = event.touches[0];
touchX = touch.clientX;
touchY = touch.clientY;
// Reduce the candidates down to the one whose axis follows the finger most closely
if (touchTargets.length > 1) {
for (var i = 0; i < touchTargets.length; ++i) {
var target = touchTargets[i];
if (target.disable && target.disable(touchX, touchY, startX, startY)) {
target.terminator();
touchTargets.splice(i, 1);
break;
}
}
}
try {
touchTargets[0].pullToRefresh();
} catch(e) {}
}
function onTouchEnd(event) {
if (holdTimeout) {
clearTimeout(holdTimeout);
holdTimeout = 0;
}
// Simulate a click event when releasing the finger
if (touched) {
var evt = document.createEvent('MouseEvents');
evt.initMouseEvent('click', true, true, window, 1);
touched[0].dispatchEvent(evt);
releaseTouched(touched);
} else {
try {
touchTargets[0].pullToRefreshRelease();
} catch(e) {}
}
d.removeEventListener('touchmove', onTouchMove, false);
d.removeEventListener('touchend', onTouchEnd, false);
touchDown = false;
}
}
function wrapTarget(target, startX, startY, startTime) {
var delegate = target.delegate;
var constrained = target.constrained;
var paginated = target.paginated;
var viewport = target.viewport || 0;
var scrollbar = target.scrollbar;
var position = target.node[target.key];
var min = target.min;
var max = target.max;
var absMin = min;
var absMax = Math.round(max/viewport)*viewport;
var pageSpacing = 0;
var velocity = 0;
var decelerating = 0;
var decelOrigin, decelDelta;
var bounceLimit = target.bounce;
var pageLimit = viewport * kPageLimit;
var lastTouch = startTouch = target.filter(startX, startY);
var lastTime = startTime;
var stillTime = 0;
var stillThreshold = 20;
var snapped = false;
var locked = false;
var isPullingUp = false;
var isPullingDown = false;
if (paginated) {
var excess = Math.round(Math.abs(absMin) % viewport);
var pageCount = ((Math.abs(absMin)-excess) / viewport)+1;
var pageSpacing = excess / pageCount;
var positionSpacing = Math.round(position) % viewport;
var pagePosition = Math.round((position-positionSpacing)/viewport) * viewport;
min = max = Math.round(pagePosition + absMax)+positionSpacing;
absMin += pageSpacing;
}
if (delegate && delegate.onStartScroll) {
if (!delegate.onStartScroll()) {
return null;
}
}
if (scrollbar) {
target.node.parentNode.appendChild(scrollbar);
}
function animator(touch, time) {
var deltaTime = 1 / (time - lastTime);
lastTime = time;
var continues = true;
if (touchDown) {
var delta = (touch - lastTouch) * kTouchMultiplier;
if (!delta) {
// Heuristics to prevent out delta=0 changes from making velocity=0 and
// stopping all motion in its tracks. We need to distinguish when the finger
// has actually stopped moving from when the timer fired too quickly.
if (!stillTime) {
stillTime = time;
}
if (time - stillTime < stillThreshold) {
return true;
}
} else {
stillTime = 0;
}
if (!locked && Math.abs(touch - startTouch) > kLockThreshold) {
locked = true;
if (delegate && delegate.onLockScroll) {
delegate.onLockScroll(target.key);
}
}
lastTouch = touch;
velocity = delta / deltaTime;
// Apply resistance along the edges
if (position > max && constrained) {
var excess = position - max;
velocity *= (1.0 - excess / bounceLimit);
} else if (position < min && constrained) {
var excess = min - position;
velocity *= (1.0 - excess / bounceLimit);
}
} else {
if (paginated && !snapped) {
// When finger is released, decide whether to jump to next/previous page
// or to snap back to the current page
snapped = true;
if (Math.abs(position - max) > pageLimit || Math.abs(velocity) > kPageEscapeVelocity) {
if (position > max) {
if (max != absMax) {
max += viewport+pageSpacing;
min += viewport+pageSpacing;
if (delegate && delegate.onScrollPage) {
var totalSpacing = min % viewport;
var page = -Math.round((position+viewport-totalSpacing)/viewport);
delegate.onScrollPage(page, -1);
}
}
} else {
if (min != absMin) {
max -= viewport+pageSpacing;
min -= viewport+pageSpacing;
if (delegate && delegate.onScrollPage) {
var totalSpacing = min % viewport;
var page = -Math.round((position-viewport-totalSpacing)/viewport);
delegate.onScrollPage(page, 1);
}
}
}
}
}
if (position > max && constrained) {
if (velocity > 0) {
// Slowing down
var excess = position - max;
var elasticity = (1.0 - excess / bounceLimit);
velocity = Math.max(velocity - kBounceDecelRate * deltaTime, 0) * elasticity;
decelerating = 0;
} else {
// Bouncing back
if (!decelerating) {
decelOrigin = position;
decelDelta = max - position;
}
position = easeOutExpo(decelerating, decelOrigin, decelDelta, kBounceTime);
return update(position, ++decelerating <= kBounceTime && Math.floor(position) > max);
}
} else if (position < min && constrained) {
if (velocity < 0) {
// Slowing down
var excess = min - position;
var elasticity = (1.0 - excess / bounceLimit);
velocity = Math.min(velocity + kBounceDecelRate * deltaTime, 0) * elasticity;
decelerating = 0;
} else {
// Bouncing back
if (!decelerating) {
decelOrigin = position;
decelDelta = min - position;
}
position = easeOutExpo(decelerating, decelOrigin, decelDelta, kBounceTime);
return update(position, ++decelerating <= kBounceTime && Math.ceil(position) < min);
}
} else {
// Slowing down
if (!decelerating) {
if (velocity < 0 && velocity < -kMaxVelocity) {
velocity = -kMaxVelocity;
} else if (velocity > 0 && velocity > kMaxVelocity) {
velocity = kMaxVelocity;
}
decelOrigin = velocity;
}
velocity = easeOutExpo(decelerating, decelOrigin, -decelOrigin, kDecelRate);
if (++decelerating > kDecelRate || Math.floor(velocity) == 0) {
continues = false;
}
}
}
position += velocity * deltaTime;
return update(position, continues);
}
function update(pos, continues) {
position = pos;
target.node[target.key] = position;
target.update(target.node, position);
if (delegate && delegate.onScroll) {
delegate.onScroll(position);
}
// Update the scrollbar
var range = -min - max;
if (scrollbar && (range + viewport) > viewport) {
var viewable = viewport - kScrollbarMargin*2;
var height = (viewable/(range+viewport)) * viewable;
var scrollPosition = 0;
if (position > max) {
height = Math.max(height - (position-max), 5);
scrollPosition = 0;
} else if (position < min) {
height = Math.max(height - (min - position), 5);
scrollPosition = (viewable-height);
} else {
scrollPosition = Math.round((Math.abs(position) / range) * (viewable-height));
}
scrollPosition += kScrollbarMargin;
scrollbar.style.height = Math.round(height) + 'px';
moveElement(scrollbar, 0, Math.round(scrollPosition));
if (touchMoved) {
scrollbar.style.webkitTransition = 'none';
scrollbar.style.opacity = '1';
}
}
return continues;
}
function terminator() {
// Snap to the integer endpoint, since position may be a subpixel value while animating
if (paginated) {
var pageIndex = Math.round(position/viewport);
update(pageIndex * (viewport+pageSpacing));
} else if (position > max && constrained) {
update(max);
} else if (position < min && constrained) {
update(min);
}
// Hide the scrollbar
if (scrollbar) {
scrollbar.style.opacity = '0';
scrollbar.style.webkitTransition = 'opacity 0.33s linear';
}
if (delegate && delegate.onEndScroll) {
delegate.onEndScroll();
}
}
function pullToRefresh(released) {
var pullUpMin = min - target.pullUpToRefresh.offsetHeight / 2;
var pullDownMin = max + target.pullDownToRefresh.offsetHeight;
var pullState;
return function() {
if (target.pullUpToRefresh || target.pullDownToRefresh) {
if ( !released &&
(
(isPullingDown && ((pullDownMin < position && pullState) || (pullDownMin > position && !pullState)))
||
(isPullingUp && ((position < pullUpMin && pullState) || (position > pullUpMin && !pullState)))
)
) {
return;
}
if (released && (position > pullDownMin)) {
pullState = 'pulledDown';
isPullingUp = false;
isPullingDown = false;
} else if (released && (position < pullUpMin)) {
pullState = 'pulledUp';
isPullingUp = false;
isPullingDown = false;
} else if (isPullingDown && (position < pullDownMin)) {
pullState = 'pullDownCancel';
isPullingUp = false;
isPullingDown = false;
} else if (isPullingUp && (position > pullUpMin)) {
pullState = 'pullUpCancel';
isPullingUp = false;
isPullingDown = false;
} else if (position > pullDownMin) {
pullState = 'pullingDown';
isPullingUp = false;
isPullingDown = true;
} else if (position < pullUpMin) {
pullState = 'pullingUp';
isPullingUp = true;
isPullingDown = false;
}
var evt = document.createEvent('Event');
evt.initEvent(pullState, true, false);
target.node.dispatchEvent(evt);
}
}
}
target.updater = update;
target.animator = animator;
target.terminator = terminator;
target.pullToRefresh = pullToRefresh(false);
target.pullToRefreshRelease = pullToRefresh(true);
return target;
}
function touchAnimation() {
var time = new Date().getTime();
// Animate each of the targets
for (var i = 0; i < touchTargets.length; ++i) {
var target = touchTargets[i];
// Translate the x/y touch into the value needed by each of the targets
var touch = target.filter(touchX, touchY);
if (!target.animator(touch, time)) {
target.terminator();
touchTargets.splice(i--, 1);
}
}
if (!touchTargets.length) {
stopAnimation();
}
}
// *************************************************************************************************
function getTouchTargets(node, touchX, touchY, startTime) {
var targets = [];
findTargets(node, targets, touchX, touchY, startTime);
var candidates = document.querySelectorAll('.scrollable.global');
for(var j = 0; j < candidates.length; ++j) {
findTargets(candidates[j], targets, touchX, touchY, startTime);
}
return targets;
}
function findTargets(element, targets, touchX, touchY, startTime) {
while (element) {
if (element.nodeType == 1) {
var target = createTargetForElement(element, touchX, touchY, startTime);
if (target) {
// Look out for duplicates
var exists = false;
for (var j = 0; j < targets.length; ++j) {
if (targets[j].node == element) {
exists = true;
break;
}
}
if (!exists) {
target = wrapTarget(target, touchX, touchY, startTime);
if (target) {
targets.push(target);
}
}
}
}
element = element.parentNode;
}
}
function createTargetForElement(element, touchX, touchY, startTime) {
var classes = element.className.split(' ');
for (var i = 0; i < classes.length; ++i) {
var name = classes[i];
if (scrollers[name]) {
var target = scrollers[name](element);
target.key = 'scrollable_'+name;
target.paginated = classes.indexOf('paginated') != -1;
if (!(target.key in element)) {
element[target.key] = target.initial ? target.initial(element) : 0;
}
return target;
}
}
}
function setTouched(target) {
var touched = [];
for (var n = target; n; n = n.parentNode) {
if (n.nodeType == 1) {
n.className = (n.className ? n.className + ' ' : '') + 'touched';
touched.push(n);
}
}
return touched;
}
function releaseTouched(touched) {
for (var i = 0; i < touched.length; ++i) {
var n = touched[i];
n.className = n.className.replace('touched', '');
}
}
function stopAnimation() {
if (animationInterval) {
clearInterval(animationInterval);
animationInterval = 0;
for (var i = 0; i < touchTargets.length; ++i) {
var target = touchTargets[i];
target.terminator();
}
touchTargets = [];
}
}
function moveElement(element, x, y) {
if (isWebkit) {
element.style.webkitTransform = 'translate3d('
+(x ? (x+'px') : '0')+','
+(y ? (y+'px') : '0')+','
+'0)';
} else if (isFirefox) {
element.style.MozTransform = 'translate3d('
+(x ? (x+'px') : '0')+','
+(y ? (y+'px') : '0')+')';
}
if(!onScrollEvt && useOnScrollEvt) {
onScrollEvt = setTimeout(function() {
var evt = document.createEvent('Event');
// Don't want this to bubble because of scrollToTop
evt.initEvent('scroll', false, false);
evt.x = -x || 0;
evt.y = -y || 0;
element.dispatchEvent(evt);
onScrollEvt = false;
}, 20);
}
}
function initScrollbar(element) {
if (!element.scrollableScrollbar) {
var scrollbar = element.scrollableScrollbar = document.createElement('div');
scrollbar.className = 'scrollableScrollbar';
// We hardcode this CSS here to avoid having to provide a CSS file
scrollbar.style.cssText = [
'position: absolute',
'top: 0',
'right: 2px',
'width: 5px',
'min-height: 4px',
'background: rgba(40, 40, 40, 0.6)',
'border: 1px solid rgba(235, 235, 235, 0.1)',
'opacity: 0',
'-webkit-border-radius: 4px 5px',
'-webkit-transform: translate3d(0,0,0)',
'-webkit-box-sizing: border-box',
'z-index: 2147483647'
].join(';');
}
return element.scrollableScrollbar;
}
function easeOutExpo(t, b, c, d) {
return (t==d) ? b+c : c * (-Math.pow(2, -10 * t/d) + 1) + b;
}
// *************************************************************************************************
function createXTarget(element) {
var parent = element.parentNode;
return {
node: element,
min: -parent.scrollWidth + parent.offsetWidth,
max: 0,
viewport: parent.offsetWidth,
bounce: parent.offsetWidth * kBounceLimit,
constrained: true,
delegate: element.scrollDelegate,
filter: function(x, y) {
return x;
},
disable: function (x, y, startX, startY) {
var dx = Math.abs(x - startX);
var dy = Math.abs(y - startY);
if (dy > dx && dy > kLockThreshold) {
return true;
}
},
update: function(element, position) {
moveElement(element, position, element.scrollable_vertical||0);
}
};
}
function createYTarget(element) {
var parent = element.parentNode,
pullDownToRefresh = parent.getElementsByClassName('pull-down-to-refresh')[0];
pullUpToRefresh = parent.getElementsByClassName('pull-up-to-refresh')[0];
return {
node: element,
scrollbar: initScrollbar(element),
min: -parent.scrollHeight + parent.offsetHeight
+ (pullUpToRefresh ? pullUpToRefresh.offsetHeight : 0),
max: (pullDownToRefresh ? -pullDownToRefresh.offsetHeight : 0),
viewport: parent.offsetHeight,
bounce: parent.offsetHeight * kBounceLimit,
pullUpToRefresh: pullUpToRefresh ? pullUpToRefresh : false,
pullDownToRefresh: pullDownToRefresh ? pullDownToRefresh : false,
constrained: true,
delegate: element.scrollDelegate,
filter: function(x, y) {
return y;
},
disable: function(x, y, startX, startY) {
var dx = Math.abs(x - startX);
var dy = Math.abs(y - startY);
if (dx > dy && dx > kLockThreshold) {
return true;
}
},
update: function(element, position) {
moveElement(element, element.scrollable_horizontal||0, position);
}
};
}
document.addEventListener('touchstart', onTouchStart, false);
document.addEventListener('scroll', onScroll, false);
document.addEventListener('orientationchange', onOrientationChange, false);
window.addEventListener('load', onLoad, false);
})();
// convience - scottp
scrollability.scrollToTop = function(elt) {
if (elt) {
scrollability.scrollTo(elt, 0, 0);
} else {
var scrollables = document.getElementsByClassName('scrollable');
if (scrollables.length) {
var scrollable = scrollables[0];
if (scrollable.className.indexOf('vertical') != -1) {
scrollability.scrollTo(scrollable, 0, 0, kScrollToTopTime);
}
}
}
} | PypiClean |
/Confluence-Task-Crawler-0.0.6.tar.gz/Confluence-Task-Crawler-0.0.6/ctr/Database/model.py | from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import DateTime
from sqlalchemy import Date
from sqlalchemy import Boolean
from sqlalchemy import func, event
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.ext.hybrid import hybrid_property
from datetime import datetime, date as datetimedate
from ctr.Util import logger
Base = declarative_base()
def _extract_company_from_email(email: str):
"""
Extracts Company-Name from E-Mail. If length of company-Name is up to 4 digits company name is rendered in upper-
case, e.g. Ibm looks weird, IBM looks better. Otherwise first character of company-name is translated to upper case
:param email:
:return:
"""
if not email:
return "unknown"
if "@" not in email:
return "unknown"
# "[email protected]"
company = email.split("@")[1] # fritzi.com
company = company.split(".")[0] # fritzi
if len(company) <= 4:
return company.upper() # Fritzi
return company.title()
class ModelDoku:
"""
This file holds the sqlalchemy-Model. Basically the tables and the fields and a bit of Alchemy.
"""
class User(Base):
__tablename__ = "conf_users"
id = Column(Integer, primary_key=True, autoincrement=True)
conf_name = Column(String(100), nullable=False)
conf_userkey = Column(String(100), nullable=False)
display_name = Column(String(100), nullable=True)
email = Column(String(255), nullable=True) # Might be filled in later!
last_crawled = Column(DateTime(), server_onupdate=func.now(), server_default=func.now())
tasks_last_crawled = Column(DateTime(), nullable=True)
company = Column(String(100), nullable=True, index=True)
def __repr__(self):
return f"User(id={self.id!r}, Name={self.conf_name!r} E-Mail={self.email!r}"
def __init__(self, conf_name, conf_userkey, email, display_name, last_crawled=None, company="unknown"):
self.conf_name = conf_name
self.conf_userkey = conf_userkey
# Needs to be before email so that it will be overwritten with proper value
self.company = company
self.email = email
self.display_name = display_name
self.last_crawled = last_crawled
@event.listens_for(User.email, "set")
def update_User_email(target, value, oldvalue, initiator):
"""
Listens to changes to "User.email" and writes company-field from these changes.
:param target: the User-class
:param value: the new value of User.email
:param oldvalue: The previous value. Not used so far.
:param initiator: not used so far.
:return:
"""
target.company = _extract_company_from_email(value)
return target
class Task(Base):
__tablename__ = "tasks"
internal_id = Column(Integer, primary_key=True, autoincrement=True)
global_id = Column(String(30), nullable=False) # GlobalID from Confluence. This is a bit weird as it's not
# supplied in tasks in pages but only in taskview-API.
task_id = Column(Integer, nullable=False) # TaskId on the current page
reminder_date = Column(Date, nullable=True) # The first date that can be found in a task
second_date = Column(Date, nullable=True) # The second date that can be found in a task
due_date = Column(Date, nullable=True) # Calculated due_date
is_done = Column(Boolean, nullable=False) # Task incomplete or completed
first_seen = Column(DateTime(), default=func.now())
last_crawled = Column(DateTime(), onupdate=func.now(), nullable=True, index=True)
# The HTML Task Description from the WIKI-PAge
task_description = Column(String(), nullable=True)
# Link to user_tasks, who is the owner of the task. Confluence considers only the first mentioned user_tasks
# as assignee. All other names are just as information
user_id = Column(Integer, ForeignKey("conf_users.id"), nullable=True)
user = relationship("User", backref="tasks")
# Link to the page, where this task can be found
page_link = Column(Integer, ForeignKey("pages.internal_id"), nullable=False)
page = relationship("Page", backref="tasks")
def __init__(self, global_id=global_id):
self.global_id = global_id
def __repr__(self):
return f"Int.ID={self.internal_id!r}, global_id={self.global_id!r}"
@hybrid_property
def age(self):
# This is processed during single record operations
x = (datetimedate.today() - self.due_date)
return x
@age.expression
def age(cls):
# FIXME: Works only on SQLITE-DB
# This is processed during queries with more than one entry
return func.julianday("now") - func.julianday(cls.due_date)
@event.listens_for(Task.second_date, "set")
def update_due_date_from_second_date(target: Task, value, oldvalue, initiator):
"""
If second date (=value) is lower than reminder date then this is the due_date.
If second date (=value) is not filled set the due_date to reminder_date (=1st date)
:param target:
:param value:
:param oldvalue:
:param initiator:
:return:
"""
if not target.reminder_date and value:
target.due_date = value
return target
if value:
try:
if value < target.reminder_date:
target.due_date = value
return target
except TypeError:
logger.critical(f"Received {value}. Target.reminder_date was {target.reminder_date}. Didn't do anything.")
return target
if not value:
target.due_date = target.reminder_date
return target
@event.listens_for(Task.reminder_date, "set")
def update_due_date_from_reminder_date(target: Task, value, oldvalue, initiator):
"""
Due date is either the second date (if that one is higher than the reminder date) or the reminder date.
:param target: the Task-Instance
:param value: the new value of Task.reminder_date
:param oldvalue:
:param initiator:
:return: the Task-Instance
"""
if target.second_date:
if value or datetimedate(year=1972, month=1, day=1) > target.second_date:
target.due_date = target.second_date
return target
target.due_date = value
return target
target.due_date = value
return target
class Page(Base):
__tablename__ = "pages"
internal_id = Column(Integer, primary_key=True, autoincrement=True)
page_link = Column(String(100), nullable=False) # Link to the page
page_name = Column(String(200), nullable=False) # Name/Title of the page
page_id = Column(Integer, nullable=True, unique=True) # The unique Confluence PageID
space = Column(String(50), nullable=True, index=True) # True because Space is not known during initial creation.
last_crawled = Column(DateTime, onupdate=func.now(), nullable=False, index=True)
def __init__(self, page_link, page_name):
self.page_link = page_link
self.page_name = page_name
self.last_crawled = datetime.now()
def __repr__(self):
return f'Name: {self.page_name!r}, ID: {self.page_id}'
class Statistics(Base):
__tablename__ = "stats"
id = Column(Integer, primary_key=True, autoincrement=True)
stat_date = Column(Date, nullable=False)
space = Column(String(50), nullable=False)
user_id = Column(Integer, ForeignKey("conf_users.id"), nullable=True)
user = relationship("User", backref="stats")
overdue = Column(Integer, nullable=True)
total = Column(Integer, nullable=False)
def __init__(self, space, date, user_id, overdue, total):
self.space = space
self.stat_date = date
self.user_id = user_id
self.overdue = overdue
self.total = total
class CreateTableStructures:
"""
Create the Tables in the database, if not already there.
Existing tables are not updated. New Tables are created.
"""
def __init__(self, engine):
self.engine = engine
def create_table_structures(self):
Base.metadata.create_all(self.engine) | PypiClean |
/AQoPA-0.9.5.tar.gz/AQoPA-0.9.5/aqopa/module/greenanalysis/__init__.py |
from aqopa import module
from .gui import ModuleGui
from aqopa.simulator.state import HOOK_TYPE_SIMULATION_FINISHED
from .console import PrintResultsHook
class Module(module.Module):
def __init__(self, energyanalysis_module):
self.energyanalysis_module = energyanalysis_module
self.carbon_dioxide_emissions = {}
self.pounds_of_co2_per_kWh = 0
def get_pounds_of_co2_per_kWh(self):
return self.pounds_of_co2_per_kWh
def set_pounds_of_co2_per_kWh(self, pounds_of_co2_per_kWh):
self.pounds_of_co2_per_kWh = pounds_of_co2_per_kWh
def get_gui(self):
if not getattr(self, '__gui', None):
setattr(self, '__gui', ModuleGui(self))
return getattr(self, '__gui', None)
def _install(self, simulator):
"""
"""
return simulator
def install_console(self, simulator):
""" Install module for console simulation """
self._install(simulator)
hook = PrintResultsHook(self, simulator)
simulator.register_hook(HOOK_TYPE_SIMULATION_FINISHED, hook)
return simulator
def install_gui(self, simulator):
""" Install module for gui simulation """
self._install(simulator)
return simulator
def __convert_to_joules(self, millijoules):
return millijoules / 1000.0
def __convert_to_kWh(self, joules):
return joules / 3600000.0
def calculate_emission(self, consumed_joules, pounds_of_co2_per_kWh):
kWhs = self.__convert_to_kWh(consumed_joules)
pounds = kWhs * pounds_of_co2_per_kWh
return pounds
def calculate_emission_for_host(self, simulator, host, pounds_of_co2_per_kWh):
all_consumptions = self.get_all_hosts_consumption(simulator)
joules = all_consumptions[host]['energy']
pounds_for_host = self.calculate_emission(joules, pounds_of_co2_per_kWh)
return pounds_for_host
def calculate_all_emissions(self, simulator, hosts, pounds_of_co2_per_kWh):
all_emissions = {}
for host in hosts:
all_emissions[host] = self.calculate_emission_for_host(simulator, host, pounds_of_co2_per_kWh)
self.add_co2_emission(simulator, host, all_emissions[host])
return all_emissions
def add_co2_emission(self, simulator, host, co2_emission):
# add a new simulator if not available yet
if simulator not in self.carbon_dioxide_emissions:
self.carbon_dioxide_emissions[simulator] = {}
# add a new host if not available yet
if host not in self.carbon_dioxide_emissions[simulator]:
self.carbon_dioxide_emissions[simulator][host] = []
# add he amount of released carbon dioxide for the
# host - but only if we have not added it yet and
# if it is not 'empty'
if co2_emission not in self.carbon_dioxide_emissions[simulator][host] and co2_emission:
self.carbon_dioxide_emissions[simulator][host].append(co2_emission)
def get_min_emission(self, simulator, hosts):
host = hosts[0]
min_cost = self.carbon_dioxide_emissions[simulator][hosts[0]]
if len(min_cost) > 0:
for h in hosts:
if self.carbon_dioxide_emissions[simulator][h] < min_cost:
min_cost = self.carbon_dioxide_emissions[simulator][h]
host = h
return min_cost[0], host
else:
return 0, None
def get_max_emission(self, simulator, hosts):
host = hosts[0]
max_cost = self.carbon_dioxide_emissions[simulator][hosts[0]]
if len(max_cost) > 0:
for h in hosts:
if self.carbon_dioxide_emissions[simulator][h] > max_cost:
max_cost = self.carbon_dioxide_emissions[simulator][h]
host = h
return max_cost[0], host
else :
return 0, None
def get_avg_emission(self, simulator, hosts):
cost_sum = 0.0
i = 0
for host in hosts:
for cost in self.carbon_dioxide_emissions[simulator][host]:
cost_sum += cost
i += 1
if i != 0:
return cost_sum / i
else:
return 0
def get_total_emission(self, simulator, hosts):
cost_sum = 0.0
for host in hosts:
for cost in self.carbon_dioxide_emissions[simulator][host]:
cost_sum += cost
return cost_sum
def get_all_emissions(self, simulator):
if simulator not in self.carbon_dioxide_emissions:
return []
return self.carbon_dioxide_emissions[simulator]
def get_all_hosts_consumption(self, simulator):
hosts = simulator.context.hosts
voltage = self.energyanalysis_module.get_voltage()
consumptions = self.energyanalysis_module.get_hosts_consumptions(simulator, hosts, voltage)
return consumptions | PypiClean |
/JumpScale-core-6.0.0.tar.gz/JumpScale-core-6.0.0/lib/JumpScale/core/base/time/Time.py | from JumpScale import j
import time
import struct
class Time:
"""
generic provider of time functions
lives at j.base.time
"""
def getTimeEpoch(self):
'''
Get epoch timestamp (number of seconds passed since January 1, 1970)
'''
try:
return j.core.appserver6.runningAppserver.webserver.epoch #@todo P3 (check if working)
except:
pass
timestamp = int(time.time())
return timestamp
def getTimeEpochBin(self):
'''
Get epoch timestamp (number of seconds passed since January 1, 1970)
'''
return struct.pack("<I",self.getTimeEpoch())
def getLocalTimeHR(self):
'''Get the current local date and time in a human-readable form'''
#timestamp = time.asctime(time.localtime(time.time()))
timestr=self.formatTime(self.getTimeEpoch())
return timestr
def getLocalTimeHRForFilesystem(self):
#@todo check if correct implementation
return time.strftime("%d_%b_%Y_%H_%M_%S", time.gmtime())
def formatTime(self,epoch,formatstr='%Y/%m/%d %H:%M:%S',local=True):
'''
Returns a formatted time string representing the current time
See http://docs.python.org/lib/module-time.html#l2h-2826 for an
overview of available formatting options.
@param format: Format string
@type format: string
@returns: Formatted current time
@rtype: string
'''
epoch=float(epoch)
if local:
timetuple=time.localtime(epoch)
else:
timetuple=time.gmtime(epoch)
timestr=time.strftime(formatstr,timetuple)
return timestr
def epoch2HRDate(self,epoch,local=True):
return self.formatTime(epoch,'%Y/%m/%d',local)
def epoch2HRDateTime(self,epoch,local=True):
return self.formatTime(epoch,'%Y/%m/%d %H:%M:%S',local)
def epoch2HRTime(self,epoch,local=True):
return self.formatTime(epoch,'%H:%M:%S',local)
def getMinuteId(self,epoch=None):
"""
is # min from jan 1 2010
"""
if epoch==None:
epoch=time.time()
if epoch<1262318400.0:
raise RuntimeError("epoch cannot be smaller than 1262318400, given epoch:%s"%epoch)
return int((epoch-1262318400.0)/60.0)
def getHourId(self,epoch=None):
"""
is # hour from jan 1 2010
"""
return int(self.getMinuteId(epoch)/60)
def fiveMinuteIdToEpoch(self,fiveMinuteId):
return fiveMinuteId*60*5+1262318400
def get5MinuteId(self,epoch=None):
"""
is # 5 min from jan 1 2010
"""
return int(self.getMinuteId(epoch)/5)
def getDayId(self,epoch=None):
"""
is # day from jan 1 2010
"""
return int(self.getMinuteId(epoch)/(60*24))
def getEpochAgo(self,txt):
"""
only supported now is -3d and -3h (ofcourse 3 can be any int)
and an int which would be just be returned
means 3 days ago 3 hours ago
if txt==None or 0 then will be 1 day ago
"""
if txt==None or str(txt).strip()=="0":
return self.getEpochAgo("-1d")
if j.basetype.string.check(txt):
txt=txt.lower()
if txt.find("-")==-1:
raise RuntimeError("Cannot find time, needs to be in format -3d and -3h (ofcourse 3 can be any int)")
if txt.find("d")<>-1:
ago=int(txt.replace("d","").replace("-",""))
return self.getTimeEpoch()-(ago*60*60*24)
if txt.find("h")<>-1:
ago=int(txt.replace("h","").replace("-",""))
return self.getTimeEpoch()-(ago*60*60)
raise RuntimeError("Cannot find time, needs to be in format -3d and -3h (ofcourse 3 can be any int)")
else:
return int(txt)
def getEpochFuture(self,txt):
"""
only supported now is +3d and +3h (ofcourse 3 can be any int)
+3d means 3 days in future
and an int which would be just be returned
if txt==None or 0 then will be 1 day ago
"""
if txt==None or str(txt).strip()=="0":
return self.getTimeEpoch()
if j.basetype.string.check(txt):
txt=txt.lower()
if txt.find("+")==-1:
raise RuntimeError("Cannot find time, needs to be in format +3d and +3h (ofcourse 3 can be any int)")
if txt.find("d")<>-1:
ago=int(txt.replace("d","").replace("+",""))
return self.getTimeEpoch()-(ago*60*60*24)
if txt.find("h")<>-1:
ago=int(txt.replace("h","").replace("+",""))
return self.getTimeEpoch()-(ago*60*60)
raise RuntimeError("Cannot find time, needs to be in format +3d and +3h (ofcourse 3 can be any int)")
else:
return int(txt)
def HRDatetoEpoch(self,datestr,local=True):
"""
convert string date to epoch
Date needs to be formatted as 16/06/1988
"""
if datestr.strip()=="":
return 0
try:
datestr=datestr.strip()
return time.mktime(time.strptime(datestr, "%d/%m/%Y"))
except:
raise ValueError ("Date needs to be formatted as \"16/06/1981\", also check if date is valid, now format = %s" % datestr) | PypiClean |
/Flask-KQMaps-0.4.2.tar.gz/Flask-KQMaps-0.4.2/flask_kqmaps/static/kqwebclient/leaflet/3rd_libs/leaflet.markercluster/leaflet.markercluster.js | !function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t((e.Leaflet=e.Leaflet||{},e.Leaflet.markercluster=e.Leaflet.markercluster||{}))}(this,function(e){"use strict";var t=L.MarkerClusterGroup=L.FeatureGroup.extend({options:{maxClusterRadius:80,iconCreateFunction:null,clusterPane:L.Marker.prototype.options.pane,spiderfyOnMaxZoom:!0,showCoverageOnHover:!0,zoomToBoundsOnClick:!0,singleMarkerMode:!1,disableClusteringAtZoom:null,removeOutsideVisibleBounds:!0,animate:!0,animateAddingMarkers:!1,spiderfyDistanceMultiplier:1,spiderLegPolylineOptions:{weight:1.5,color:"#222",opacity:.5},chunkedLoading:!1,chunkInterval:200,chunkDelay:50,chunkProgress:null,polygonOptions:{}},initialize:function(e){L.Util.setOptions(this,e),this.options.iconCreateFunction||(this.options.iconCreateFunction=this._defaultIconCreateFunction),this._featureGroup=L.featureGroup(),this._featureGroup.addEventParent(this),this._nonPointGroup=L.featureGroup(),this._nonPointGroup.addEventParent(this),this._inZoomAnimation=0,this._needsClustering=[],this._needsRemoving=[],this._currentShownBounds=null,this._queue=[],this._childMarkerEventHandlers={dragstart:this._childMarkerDragStart,move:this._childMarkerMoved,dragend:this._childMarkerDragEnd};var t=L.DomUtil.TRANSITION&&this.options.animate;L.extend(this,t?this._withAnimation:this._noAnimation),this._markerCluster=t?L.MarkerCluster:L.MarkerClusterNonAnimated},addLayer:function(e){if(e instanceof L.LayerGroup)return this.addLayers([e]);if(!e.getLatLng)return this._nonPointGroup.addLayer(e),this.fire("layeradd",{layer:e}),this;if(!this._map)return this._needsClustering.push(e),this.fire("layeradd",{layer:e}),this;if(this.hasLayer(e))return this;this._unspiderfy&&this._unspiderfy(),this._addLayer(e,this._maxZoom),this.fire("layeradd",{layer:e}),this._topClusterLevel._recalculateBounds(),this._refreshClustersIcons();var t=e,i=this._zoom;if(e.__parent)for(;t.__parent._zoom>=i;)t=t.__parent;return this._currentShownBounds.contains(t.getLatLng())&&(this.options.animateAddingMarkers?this._animationAddLayer(e,t):this._animationAddLayerNonAnimated(e,t)),this},removeLayer:function(e){return e instanceof L.LayerGroup?this.removeLayers([e]):e.getLatLng?this._map?e.__parent?(this._unspiderfy&&(this._unspiderfy(),this._unspiderfyLayer(e)),this._removeLayer(e,!0),this.fire("layerremove",{layer:e}),this._topClusterLevel._recalculateBounds(),this._refreshClustersIcons(),e.off(this._childMarkerEventHandlers,this),this._featureGroup.hasLayer(e)&&(this._featureGroup.removeLayer(e),e.clusterShow&&e.clusterShow()),this):this:(!this._arraySplice(this._needsClustering,e)&&this.hasLayer(e)&&this._needsRemoving.push({layer:e,latlng:e._latlng}),this.fire("layerremove",{layer:e}),this):(this._nonPointGroup.removeLayer(e),this.fire("layerremove",{layer:e}),this)},addLayers:function(e,t){if(!L.Util.isArray(e))return this.addLayer(e);var i,n=this._featureGroup,r=this._nonPointGroup,s=this.options.chunkedLoading,o=this.options.chunkInterval,a=this.options.chunkProgress,h=e.length,l=0,u=!0;if(this._map){var _=(new Date).getTime(),d=L.bind(function(){for(var c=(new Date).getTime();h>l;l++){if(s&&0===l%200){var p=(new Date).getTime()-c;if(p>o)break}if(i=e[l],i instanceof L.LayerGroup)u&&(e=e.slice(),u=!1),this._extractNonGroupLayers(i,e),h=e.length;else if(i.getLatLng){if(!this.hasLayer(i)&&(this._addLayer(i,this._maxZoom),t||this.fire("layeradd",{layer:i}),i.__parent&&2===i.__parent.getChildCount())){var f=i.__parent.getAllChildMarkers(),m=f[0]===i?f[1]:f[0];n.removeLayer(m)}}else r.addLayer(i),t||this.fire("layeradd",{layer:i})}a&&a(l,h,(new Date).getTime()-_),l===h?(this._topClusterLevel._recalculateBounds(),this._refreshClustersIcons(),this._topClusterLevel._recursivelyAddChildrenToMap(null,this._zoom,this._currentShownBounds)):setTimeout(d,this.options.chunkDelay)},this);d()}else for(var c=this._needsClustering;h>l;l++)i=e[l],i instanceof L.LayerGroup?(u&&(e=e.slice(),u=!1),this._extractNonGroupLayers(i,e),h=e.length):i.getLatLng?this.hasLayer(i)||c.push(i):r.addLayer(i);return this},removeLayers:function(e){var t,i,n=e.length,r=this._featureGroup,s=this._nonPointGroup,o=!0;if(!this._map){for(t=0;n>t;t++)i=e[t],i instanceof L.LayerGroup?(o&&(e=e.slice(),o=!1),this._extractNonGroupLayers(i,e),n=e.length):(this._arraySplice(this._needsClustering,i),s.removeLayer(i),this.hasLayer(i)&&this._needsRemoving.push({layer:i,latlng:i._latlng}),this.fire("layerremove",{layer:i}));return this}if(this._unspiderfy){this._unspiderfy();var a=e.slice(),h=n;for(t=0;h>t;t++)i=a[t],i instanceof L.LayerGroup?(this._extractNonGroupLayers(i,a),h=a.length):this._unspiderfyLayer(i)}for(t=0;n>t;t++)i=e[t],i instanceof L.LayerGroup?(o&&(e=e.slice(),o=!1),this._extractNonGroupLayers(i,e),n=e.length):i.__parent?(this._removeLayer(i,!0,!0),this.fire("layerremove",{layer:i}),r.hasLayer(i)&&(r.removeLayer(i),i.clusterShow&&i.clusterShow())):(s.removeLayer(i),this.fire("layerremove",{layer:i}));return this._topClusterLevel._recalculateBounds(),this._refreshClustersIcons(),this._topClusterLevel._recursivelyAddChildrenToMap(null,this._zoom,this._currentShownBounds),this},clearLayers:function(){return this._map||(this._needsClustering=[],this._needsRemoving=[],delete this._gridClusters,delete this._gridUnclustered),this._noanimationUnspiderfy&&this._noanimationUnspiderfy(),this._featureGroup.clearLayers(),this._nonPointGroup.clearLayers(),this.eachLayer(function(e){e.off(this._childMarkerEventHandlers,this),delete e.__parent},this),this._map&&this._generateInitialClusters(),this},getBounds:function(){var e=new L.LatLngBounds;this._topClusterLevel&&e.extend(this._topClusterLevel._bounds);for(var t=this._needsClustering.length-1;t>=0;t--)e.extend(this._needsClustering[t].getLatLng());return e.extend(this._nonPointGroup.getBounds()),e},eachLayer:function(e,t){var i,n,r,s=this._needsClustering.slice(),o=this._needsRemoving;for(this._topClusterLevel&&this._topClusterLevel.getAllChildMarkers(s),n=s.length-1;n>=0;n--){for(i=!0,r=o.length-1;r>=0;r--)if(o[r].layer===s[n]){i=!1;break}i&&e.call(t,s[n])}this._nonPointGroup.eachLayer(e,t)},getLayers:function(){var e=[];return this.eachLayer(function(t){e.push(t)}),e},getLayer:function(e){var t=null;return e=parseInt(e,10),this.eachLayer(function(i){L.stamp(i)===e&&(t=i)}),t},hasLayer:function(e){if(!e)return!1;var t,i=this._needsClustering;for(t=i.length-1;t>=0;t--)if(i[t]===e)return!0;for(i=this._needsRemoving,t=i.length-1;t>=0;t--)if(i[t].layer===e)return!1;return!(!e.__parent||e.__parent._group!==this)||this._nonPointGroup.hasLayer(e)},zoomToShowLayer:function(e,t){"function"!=typeof t&&(t=function(){});var i=function(){!e._icon&&!e.__parent._icon||this._inZoomAnimation||(this._map.off("moveend",i,this),this.off("animationend",i,this),e._icon?t():e.__parent._icon&&(this.once("spiderfied",t,this),e.__parent.spiderfy()))};e._icon&&this._map.getBounds().contains(e.getLatLng())?t():e.__parent._zoom<Math.round(this._map._zoom)?(this._map.on("moveend",i,this),this._map.panTo(e.getLatLng())):(this._map.on("moveend",i,this),this.on("animationend",i,this),e.__parent.zoomToBounds())},onAdd:function(e){this._map=e;var t,i,n;if(!isFinite(this._map.getMaxZoom()))throw"Map has no maxZoom specified";for(this._featureGroup.addTo(e),this._nonPointGroup.addTo(e),this._gridClusters||this._generateInitialClusters(),this._maxLat=e.options.crs.projection.MAX_LATITUDE,t=0,i=this._needsRemoving.length;i>t;t++)n=this._needsRemoving[t],n.newlatlng=n.layer._latlng,n.layer._latlng=n.latlng;for(t=0,i=this._needsRemoving.length;i>t;t++)n=this._needsRemoving[t],this._removeLayer(n.layer,!0),n.layer._latlng=n.newlatlng;this._needsRemoving=[],this._zoom=Math.round(this._map._zoom),this._currentShownBounds=this._getExpandedVisibleBounds(),this._map.on("zoomend",this._zoomEnd,this),this._map.on("moveend",this._moveEnd,this),this._spiderfierOnAdd&&this._spiderfierOnAdd(),this._bindEvents(),i=this._needsClustering,this._needsClustering=[],this.addLayers(i,!0)},onRemove:function(e){e.off("zoomend",this._zoomEnd,this),e.off("moveend",this._moveEnd,this),this._unbindEvents(),this._map._mapPane.className=this._map._mapPane.className.replace(" leaflet-cluster-anim",""),this._spiderfierOnRemove&&this._spiderfierOnRemove(),delete this._maxLat,this._hideCoverage(),this._featureGroup.remove(),this._nonPointGroup.remove(),this._featureGroup.clearLayers(),this._map=null},getVisibleParent:function(e){for(var t=e;t&&!t._icon;)t=t.__parent;return t||null},_arraySplice:function(e,t){for(var i=e.length-1;i>=0;i--)if(e[i]===t)return e.splice(i,1),!0},_removeFromGridUnclustered:function(e,t){for(var i=this._map,n=this._gridUnclustered,r=Math.floor(this._map.getMinZoom());t>=r&&n[t].removeObject(e,i.project(e.getLatLng(),t));t--);},_childMarkerDragStart:function(e){e.target.__dragStart=e.target._latlng},_childMarkerMoved:function(e){if(!this._ignoreMove&&!e.target.__dragStart){var t=e.target._popup&&e.target._popup.isOpen();this._moveChild(e.target,e.oldLatLng,e.latlng),t&&e.target.openPopup()}},_moveChild:function(e,t,i){e._latlng=t,this.removeLayer(e),e._latlng=i,this.addLayer(e)},_childMarkerDragEnd:function(e){var t=e.target.__dragStart;delete e.target.__dragStart,t&&this._moveChild(e.target,t,e.target._latlng)},_removeLayer:function(e,t,i){var n=this._gridClusters,r=this._gridUnclustered,s=this._featureGroup,o=this._map,a=Math.floor(this._map.getMinZoom());t&&this._removeFromGridUnclustered(e,this._maxZoom);var h,l=e.__parent,u=l._markers;for(this._arraySplice(u,e);l&&(l._childCount--,l._boundsNeedUpdate=!0,!(l._zoom<a));)t&&l._childCount<=1?(h=l._markers[0]===e?l._markers[1]:l._markers[0],n[l._zoom].removeObject(l,o.project(l._cLatLng,l._zoom)),r[l._zoom].addObject(h,o.project(h.getLatLng(),l._zoom)),this._arraySplice(l.__parent._childClusters,l),l.__parent._markers.push(h),h.__parent=l.__parent,l._icon&&(s.removeLayer(l),i||s.addLayer(h))):l._iconNeedsUpdate=!0,l=l.__parent;delete e.__parent},_isOrIsParent:function(e,t){for(;t;){if(e===t)return!0;t=t.parentNode}return!1},fire:function(e,t,i){if(t&&t.layer instanceof L.MarkerCluster){if(t.originalEvent&&this._isOrIsParent(t.layer._icon,t.originalEvent.relatedTarget))return;e="cluster"+e}L.FeatureGroup.prototype.fire.call(this,e,t,i)},listens:function(e,t){return L.FeatureGroup.prototype.listens.call(this,e,t)||L.FeatureGroup.prototype.listens.call(this,"cluster"+e,t)},_defaultIconCreateFunction:function(e){var t=e.getChildCount(),i=" marker-cluster-";return i+=10>t?"small":100>t?"medium":"large",new L.DivIcon({html:"<div><span>"+t+"</span></div>",className:"marker-cluster"+i,iconSize:new L.Point(40,40)})},_bindEvents:function(){var e=this._map,t=this.options.spiderfyOnMaxZoom,i=this.options.showCoverageOnHover,n=this.options.zoomToBoundsOnClick;(t||n)&&this.on("clusterclick",this._zoomOrSpiderfy,this),i&&(this.on("clustermouseover",this._showCoverage,this),this.on("clustermouseout",this._hideCoverage,this),e.on("zoomend",this._hideCoverage,this))},_zoomOrSpiderfy:function(e){for(var t=e.layer,i=t;1===i._childClusters.length;)i=i._childClusters[0];i._zoom===this._maxZoom&&i._childCount===t._childCount&&this.options.spiderfyOnMaxZoom?t.spiderfy():this.options.zoomToBoundsOnClick&&t.zoomToBounds(),e.originalEvent&&13===e.originalEvent.keyCode&&this._map._container.focus()},_showCoverage:function(e){var t=this._map;this._inZoomAnimation||(this._shownPolygon&&t.removeLayer(this._shownPolygon),e.layer.getChildCount()>2&&e.layer!==this._spiderfied&&(this._shownPolygon=new L.Polygon(e.layer.getConvexHull(),this.options.polygonOptions),t.addLayer(this._shownPolygon)))},_hideCoverage:function(){this._shownPolygon&&(this._map.removeLayer(this._shownPolygon),this._shownPolygon=null)},_unbindEvents:function(){var e=this.options.spiderfyOnMaxZoom,t=this.options.showCoverageOnHover,i=this.options.zoomToBoundsOnClick,n=this._map;(e||i)&&this.off("clusterclick",this._zoomOrSpiderfy,this),t&&(this.off("clustermouseover",this._showCoverage,this),this.off("clustermouseout",this._hideCoverage,this),n.off("zoomend",this._hideCoverage,this))},_zoomEnd:function(){this._map&&(this._mergeSplitClusters(),this._zoom=Math.round(this._map._zoom),this._currentShownBounds=this._getExpandedVisibleBounds())},_moveEnd:function(){if(!this._inZoomAnimation){var e=this._getExpandedVisibleBounds();this._topClusterLevel._recursivelyRemoveChildrenFromMap(this._currentShownBounds,Math.floor(this._map.getMinZoom()),this._zoom,e),this._topClusterLevel._recursivelyAddChildrenToMap(null,Math.round(this._map._zoom),e),this._currentShownBounds=e}},_generateInitialClusters:function(){var e=Math.ceil(this._map.getMaxZoom()),t=Math.floor(this._map.getMinZoom()),i=this.options.maxClusterRadius,n=i;"function"!=typeof i&&(n=function(){return i}),null!==this.options.disableClusteringAtZoom&&(e=this.options.disableClusteringAtZoom-1),this._maxZoom=e,this._gridClusters={},this._gridUnclustered={};for(var r=e;r>=t;r--)this._gridClusters[r]=new L.DistanceGrid(n(r)),this._gridUnclustered[r]=new L.DistanceGrid(n(r));this._topClusterLevel=new this._markerCluster(this,t-1)},_addLayer:function(e,t){var i,n,r=this._gridClusters,s=this._gridUnclustered,o=Math.floor(this._map.getMinZoom());for(this.options.singleMarkerMode&&this._overrideMarkerIcon(e),e.on(this._childMarkerEventHandlers,this);t>=o;t--){i=this._map.project(e.getLatLng(),t);var a=r[t].getNearObject(i);if(a)return a._addChild(e),e.__parent=a,void 0;if(a=s[t].getNearObject(i)){var h=a.__parent;h&&this._removeLayer(a,!1);var l=new this._markerCluster(this,t,a,e);r[t].addObject(l,this._map.project(l._cLatLng,t)),a.__parent=l,e.__parent=l;var u=l;for(n=t-1;n>h._zoom;n--)u=new this._markerCluster(this,n,u),r[n].addObject(u,this._map.project(a.getLatLng(),n));return h._addChild(u),this._removeFromGridUnclustered(a,t),void 0}s[t].addObject(e,i)}this._topClusterLevel._addChild(e),e.__parent=this._topClusterLevel},_refreshClustersIcons:function(){this._featureGroup.eachLayer(function(e){e instanceof L.MarkerCluster&&e._iconNeedsUpdate&&e._updateIcon()})},_enqueue:function(e){this._queue.push(e),this._queueTimeout||(this._queueTimeout=setTimeout(L.bind(this._processQueue,this),300))},_processQueue:function(){for(var e=0;e<this._queue.length;e++)this._queue[e].call(this);this._queue.length=0,clearTimeout(this._queueTimeout),this._queueTimeout=null},_mergeSplitClusters:function(){var e=Math.round(this._map._zoom);this._processQueue(),this._zoom<e&&this._currentShownBounds.intersects(this._getExpandedVisibleBounds())?(this._animationStart(),this._topClusterLevel._recursivelyRemoveChildrenFromMap(this._currentShownBounds,Math.floor(this._map.getMinZoom()),this._zoom,this._getExpandedVisibleBounds()),this._animationZoomIn(this._zoom,e)):this._zoom>e?(this._animationStart(),this._animationZoomOut(this._zoom,e)):this._moveEnd()},_getExpandedVisibleBounds:function(){return this.options.removeOutsideVisibleBounds?L.Browser.mobile?this._checkBoundsMaxLat(this._map.getBounds()):this._checkBoundsMaxLat(this._map.getBounds().pad(1)):this._mapBoundsInfinite},_checkBoundsMaxLat:function(e){var t=this._maxLat;return void 0!==t&&(e.getNorth()>=t&&(e._northEast.lat=1/0),e.getSouth()<=-t&&(e._southWest.lat=-1/0)),e},_animationAddLayerNonAnimated:function(e,t){if(t===e)this._featureGroup.addLayer(e);else if(2===t._childCount){t._addToMap();var i=t.getAllChildMarkers();this._featureGroup.removeLayer(i[0]),this._featureGroup.removeLayer(i[1])}else t._updateIcon()},_extractNonGroupLayers:function(e,t){var i,n=e.getLayers(),r=0;for(t=t||[];r<n.length;r++)i=n[r],i instanceof L.LayerGroup?this._extractNonGroupLayers(i,t):t.push(i);return t},_overrideMarkerIcon:function(e){var t=e.options.icon=this.options.iconCreateFunction({getChildCount:function(){return 1},getAllChildMarkers:function(){return[e]}});return t}});L.MarkerClusterGroup.include({_mapBoundsInfinite:new L.LatLngBounds(new L.LatLng(-1/0,-1/0),new L.LatLng(1/0,1/0))}),L.MarkerClusterGroup.include({_noAnimation:{_animationStart:function(){},_animationZoomIn:function(e,t){this._topClusterLevel._recursivelyRemoveChildrenFromMap(this._currentShownBounds,Math.floor(this._map.getMinZoom()),e),this._topClusterLevel._recursivelyAddChildrenToMap(null,t,this._getExpandedVisibleBounds()),this.fire("animationend")},_animationZoomOut:function(e,t){this._topClusterLevel._recursivelyRemoveChildrenFromMap(this._currentShownBounds,Math.floor(this._map.getMinZoom()),e),this._topClusterLevel._recursivelyAddChildrenToMap(null,t,this._getExpandedVisibleBounds()),this.fire("animationend")},_animationAddLayer:function(e,t){this._animationAddLayerNonAnimated(e,t)}},_withAnimation:{_animationStart:function(){this._map._mapPane.className+=" leaflet-cluster-anim",this._inZoomAnimation++},_animationZoomIn:function(e,t){var i,n=this._getExpandedVisibleBounds(),r=this._featureGroup,s=Math.floor(this._map.getMinZoom());this._ignoreMove=!0,this._topClusterLevel._recursively(n,e,s,function(s){var o,a=s._latlng,h=s._markers;for(n.contains(a)||(a=null),s._isSingleParent()&&e+1===t?(r.removeLayer(s),s._recursivelyAddChildrenToMap(null,t,n)):(s.clusterHide(),s._recursivelyAddChildrenToMap(a,t,n)),i=h.length-1;i>=0;i--)o=h[i],n.contains(o._latlng)||r.removeLayer(o)}),this._forceLayout(),this._topClusterLevel._recursivelyBecomeVisible(n,t),r.eachLayer(function(e){e instanceof L.MarkerCluster||!e._icon||e.clusterShow()}),this._topClusterLevel._recursively(n,e,t,function(e){e._recursivelyRestoreChildPositions(t)}),this._ignoreMove=!1,this._enqueue(function(){this._topClusterLevel._recursively(n,e,s,function(e){r.removeLayer(e),e.clusterShow()}),this._animationEnd()})},_animationZoomOut:function(e,t){this._animationZoomOutSingle(this._topClusterLevel,e-1,t),this._topClusterLevel._recursivelyAddChildrenToMap(null,t,this._getExpandedVisibleBounds()),this._topClusterLevel._recursivelyRemoveChildrenFromMap(this._currentShownBounds,Math.floor(this._map.getMinZoom()),e,this._getExpandedVisibleBounds())},_animationAddLayer:function(e,t){var i=this,n=this._featureGroup;n.addLayer(e),t!==e&&(t._childCount>2?(t._updateIcon(),this._forceLayout(),this._animationStart(),e._setPos(this._map.latLngToLayerPoint(t.getLatLng())),e.clusterHide(),this._enqueue(function(){n.removeLayer(e),e.clusterShow(),i._animationEnd()})):(this._forceLayout(),i._animationStart(),i._animationZoomOutSingle(t,this._map.getMaxZoom(),this._zoom)))}},_animationZoomOutSingle:function(e,t,i){var n=this._getExpandedVisibleBounds(),r=Math.floor(this._map.getMinZoom());e._recursivelyAnimateChildrenInAndAddSelfToMap(n,r,t+1,i);var s=this;this._forceLayout(),e._recursivelyBecomeVisible(n,i),this._enqueue(function(){if(1===e._childCount){var o=e._markers[0];this._ignoreMove=!0,o.setLatLng(o.getLatLng()),this._ignoreMove=!1,o.clusterShow&&o.clusterShow()}else e._recursively(n,i,r,function(e){e._recursivelyRemoveChildrenFromMap(n,r,t+1)});s._animationEnd()})},_animationEnd:function(){this._map&&(this._map._mapPane.className=this._map._mapPane.className.replace(" leaflet-cluster-anim","")),this._inZoomAnimation--,this.fire("animationend")},_forceLayout:function(){L.Util.falseFn(document.body.offsetWidth)}}),L.markerClusterGroup=function(e){return new L.MarkerClusterGroup(e)};var i=L.MarkerCluster=L.Marker.extend({options:L.Icon.prototype.options,initialize:function(e,t,i,n){L.Marker.prototype.initialize.call(this,i?i._cLatLng||i.getLatLng():new L.LatLng(0,0),{icon:this,pane:e.options.clusterPane}),this._group=e,this._zoom=t,this._markers=[],this._childClusters=[],this._childCount=0,this._iconNeedsUpdate=!0,this._boundsNeedUpdate=!0,this._bounds=new L.LatLngBounds,i&&this._addChild(i),n&&this._addChild(n)},getAllChildMarkers:function(e,t){e=e||[];for(var i=this._childClusters.length-1;i>=0;i--)this._childClusters[i].getAllChildMarkers(e);for(var n=this._markers.length-1;n>=0;n--)t&&this._markers[n].__dragStart||e.push(this._markers[n]);return e},getChildCount:function(){return this._childCount},zoomToBounds:function(e){for(var t,i=this._childClusters.slice(),n=this._group._map,r=n.getBoundsZoom(this._bounds),s=this._zoom+1,o=n.getZoom();i.length>0&&r>s;){s++;var a=[];for(t=0;t<i.length;t++)a=a.concat(i[t]._childClusters);i=a}r>s?this._group._map.setView(this._latlng,s):o>=r?this._group._map.setView(this._latlng,o+1):this._group._map.fitBounds(this._bounds,e)},getBounds:function(){var e=new L.LatLngBounds;return e.extend(this._bounds),e},_updateIcon:function(){this._iconNeedsUpdate=!0,this._icon&&this.setIcon(this)},createIcon:function(){return this._iconNeedsUpdate&&(this._iconObj=this._group.options.iconCreateFunction(this),this._iconNeedsUpdate=!1),this._iconObj.createIcon()},createShadow:function(){return this._iconObj.createShadow()},_addChild:function(e,t){this._iconNeedsUpdate=!0,this._boundsNeedUpdate=!0,this._setClusterCenter(e),e instanceof L.MarkerCluster?(t||(this._childClusters.push(e),e.__parent=this),this._childCount+=e._childCount):(t||this._markers.push(e),this._childCount++),this.__parent&&this.__parent._addChild(e,!0)},_setClusterCenter:function(e){this._cLatLng||(this._cLatLng=e._cLatLng||e._latlng)},_resetBounds:function(){var e=this._bounds;e._southWest&&(e._southWest.lat=1/0,e._southWest.lng=1/0),e._northEast&&(e._northEast.lat=-1/0,e._northEast.lng=-1/0)},_recalculateBounds:function(){var e,t,i,n,r=this._markers,s=this._childClusters,o=0,a=0,h=this._childCount;if(0!==h){for(this._resetBounds(),e=0;e<r.length;e++)i=r[e]._latlng,this._bounds.extend(i),o+=i.lat,a+=i.lng;for(e=0;e<s.length;e++)t=s[e],t._boundsNeedUpdate&&t._recalculateBounds(),this._bounds.extend(t._bounds),i=t._wLatLng,n=t._childCount,o+=i.lat*n,a+=i.lng*n;this._latlng=this._wLatLng=new L.LatLng(o/h,a/h),this._boundsNeedUpdate=!1}},_addToMap:function(e){e&&(this._backupLatlng=this._latlng,this.setLatLng(e)),this._group._featureGroup.addLayer(this)},_recursivelyAnimateChildrenIn:function(e,t,i){this._recursively(e,this._group._map.getMinZoom(),i-1,function(e){var i,n,r=e._markers;for(i=r.length-1;i>=0;i--)n=r[i],n._icon&&(n._setPos(t),n.clusterHide())},function(e){var i,n,r=e._childClusters;for(i=r.length-1;i>=0;i--)n=r[i],n._icon&&(n._setPos(t),n.clusterHide())})},_recursivelyAnimateChildrenInAndAddSelfToMap:function(e,t,i,n){this._recursively(e,n,t,function(r){r._recursivelyAnimateChildrenIn(e,r._group._map.latLngToLayerPoint(r.getLatLng()).round(),i),r._isSingleParent()&&i-1===n?(r.clusterShow(),r._recursivelyRemoveChildrenFromMap(e,t,i)):r.clusterHide(),r._addToMap()})},_recursivelyBecomeVisible:function(e,t){this._recursively(e,this._group._map.getMinZoom(),t,null,function(e){e.clusterShow()})},_recursivelyAddChildrenToMap:function(e,t,i){this._recursively(i,this._group._map.getMinZoom()-1,t,function(n){if(t!==n._zoom)for(var r=n._markers.length-1;r>=0;r--){var s=n._markers[r];i.contains(s._latlng)&&(e&&(s._backupLatlng=s.getLatLng(),s.setLatLng(e),s.clusterHide&&s.clusterHide()),n._group._featureGroup.addLayer(s))}},function(t){t._addToMap(e)})},_recursivelyRestoreChildPositions:function(e){for(var t=this._markers.length-1;t>=0;t--){var i=this._markers[t];i._backupLatlng&&(i.setLatLng(i._backupLatlng),delete i._backupLatlng)}if(e-1===this._zoom)for(var n=this._childClusters.length-1;n>=0;n--)this._childClusters[n]._restorePosition();else for(var r=this._childClusters.length-1;r>=0;r--)this._childClusters[r]._recursivelyRestoreChildPositions(e)},_restorePosition:function(){this._backupLatlng&&(this.setLatLng(this._backupLatlng),delete this._backupLatlng)},_recursivelyRemoveChildrenFromMap:function(e,t,i,n){var r,s;this._recursively(e,t-1,i-1,function(e){for(s=e._markers.length-1;s>=0;s--)r=e._markers[s],n&&n.contains(r._latlng)||(e._group._featureGroup.removeLayer(r),r.clusterShow&&r.clusterShow())},function(e){for(s=e._childClusters.length-1;s>=0;s--)r=e._childClusters[s],n&&n.contains(r._latlng)||(e._group._featureGroup.removeLayer(r),r.clusterShow&&r.clusterShow())})},_recursively:function(e,t,i,n,r){var s,o,a=this._childClusters,h=this._zoom;if(h>=t&&(n&&n(this),r&&h===i&&r(this)),t>h||i>h)for(s=a.length-1;s>=0;s--)o=a[s],o._boundsNeedUpdate&&o._recalculateBounds(),e.intersects(o._bounds)&&o._recursively(e,t,i,n,r)},_isSingleParent:function(){return this._childClusters.length>0&&this._childClusters[0]._childCount===this._childCount}});L.Marker.include({clusterHide:function(){var e=this.options.opacity;return this.setOpacity(0),this.options.opacity=e,this},clusterShow:function(){return this.setOpacity(this.options.opacity)}}),L.DistanceGrid=function(e){this._cellSize=e,this._sqCellSize=e*e,this._grid={},this._objectPoint={}},L.DistanceGrid.prototype={addObject:function(e,t){var i=this._getCoord(t.x),n=this._getCoord(t.y),r=this._grid,s=r[n]=r[n]||{},o=s[i]=s[i]||[],a=L.Util.stamp(e);this._objectPoint[a]=t,o.push(e)},updateObject:function(e,t){this.removeObject(e),this.addObject(e,t)},removeObject:function(e,t){var i,n,r=this._getCoord(t.x),s=this._getCoord(t.y),o=this._grid,a=o[s]=o[s]||{},h=a[r]=a[r]||[];for(delete this._objectPoint[L.Util.stamp(e)],i=0,n=h.length;n>i;i++)if(h[i]===e)return h.splice(i,1),1===n&&delete a[r],!0},eachObject:function(e,t){var i,n,r,s,o,a,h,l=this._grid;for(i in l){o=l[i];for(n in o)for(a=o[n],r=0,s=a.length;s>r;r++)h=e.call(t,a[r]),h&&(r--,s--)}},getNearObject:function(e){var t,i,n,r,s,o,a,h,l=this._getCoord(e.x),u=this._getCoord(e.y),_=this._objectPoint,d=this._sqCellSize,c=null;for(t=u-1;u+1>=t;t++)if(r=this._grid[t])for(i=l-1;l+1>=i;i++)if(s=r[i])for(n=0,o=s.length;o>n;n++)a=s[n],h=this._sqDist(_[L.Util.stamp(a)],e),(d>h||d>=h&&null===c)&&(d=h,c=a);return c},_getCoord:function(e){var t=Math.floor(e/this._cellSize);return isFinite(t)?t:e},_sqDist:function(e,t){var i=t.x-e.x,n=t.y-e.y;return i*i+n*n}},function(){L.QuickHull={getDistant:function(e,t){var i=t[1].lat-t[0].lat,n=t[0].lng-t[1].lng;return n*(e.lat-t[0].lat)+i*(e.lng-t[0].lng)},findMostDistantPointFromBaseLine:function(e,t){var i,n,r,s=0,o=null,a=[];for(i=t.length-1;i>=0;i--)n=t[i],r=this.getDistant(n,e),r>0&&(a.push(n),r>s&&(s=r,o=n));return{maxPoint:o,newPoints:a}},buildConvexHull:function(e,t){var i=[],n=this.findMostDistantPointFromBaseLine(e,t);return n.maxPoint?(i=i.concat(this.buildConvexHull([e[0],n.maxPoint],n.newPoints)),i=i.concat(this.buildConvexHull([n.maxPoint,e[1]],n.newPoints))):[e[0]]},getConvexHull:function(e){var t,i=!1,n=!1,r=!1,s=!1,o=null,a=null,h=null,l=null,u=null,_=null;for(t=e.length-1;t>=0;t--){var d=e[t];(i===!1||d.lat>i)&&(o=d,i=d.lat),(n===!1||d.lat<n)&&(a=d,n=d.lat),(r===!1||d.lng>r)&&(h=d,r=d.lng),(s===!1||d.lng<s)&&(l=d,s=d.lng)}n!==i?(_=a,u=o):(_=l,u=h);var c=[].concat(this.buildConvexHull([_,u],e),this.buildConvexHull([u,_],e));return c}}}(),L.MarkerCluster.include({getConvexHull:function(){var e,t,i=this.getAllChildMarkers(),n=[];for(t=i.length-1;t>=0;t--)e=i[t].getLatLng(),n.push(e);return L.QuickHull.getConvexHull(n)}}),L.MarkerCluster.include({_2PI:2*Math.PI,_circleFootSeparation:25,_circleStartAngle:0,_spiralFootSeparation:28,_spiralLengthStart:11,_spiralLengthFactor:5,_circleSpiralSwitchover:9,spiderfy:function(){if(this._group._spiderfied!==this&&!this._group._inZoomAnimation){var e,t=this.getAllChildMarkers(null,!0),i=this._group,n=i._map,r=n.latLngToLayerPoint(this._latlng);this._group._unspiderfy(),this._group._spiderfied=this,t.length>=this._circleSpiralSwitchover?e=this._generatePointsSpiral(t.length,r):(r.y+=10,e=this._generatePointsCircle(t.length,r)),this._animationSpiderfy(t,e)}},unspiderfy:function(e){this._group._inZoomAnimation||(this._animationUnspiderfy(e),this._group._spiderfied=null)},_generatePointsCircle:function(e,t){var i,n,r=this._group.options.spiderfyDistanceMultiplier*this._circleFootSeparation*(2+e),s=r/this._2PI,o=this._2PI/e,a=[];for(s=Math.max(s,35),a.length=e,i=0;e>i;i++)n=this._circleStartAngle+i*o,a[i]=new L.Point(t.x+s*Math.cos(n),t.y+s*Math.sin(n))._round();return a},_generatePointsSpiral:function(e,t){var i,n=this._group.options.spiderfyDistanceMultiplier,r=n*this._spiralLengthStart,s=n*this._spiralFootSeparation,o=n*this._spiralLengthFactor*this._2PI,a=0,h=[];for(h.length=e,i=e;i>=0;i--)e>i&&(h[i]=new L.Point(t.x+r*Math.cos(a),t.y+r*Math.sin(a))._round()),a+=s/r+5e-4*i,r+=o/a;return h},_noanimationUnspiderfy:function(){var e,t,i=this._group,n=i._map,r=i._featureGroup,s=this.getAllChildMarkers(null,!0);for(i._ignoreMove=!0,this.setOpacity(1),t=s.length-1;t>=0;t--)e=s[t],r.removeLayer(e),e._preSpiderfyLatlng&&(e.setLatLng(e._preSpiderfyLatlng),delete e._preSpiderfyLatlng),e.setZIndexOffset&&e.setZIndexOffset(0),e._spiderLeg&&(n.removeLayer(e._spiderLeg),delete e._spiderLeg);i.fire("unspiderfied",{cluster:this,markers:s}),i._ignoreMove=!1,i._spiderfied=null}}),L.MarkerClusterNonAnimated=L.MarkerCluster.extend({_animationSpiderfy:function(e,t){var i,n,r,s,o=this._group,a=o._map,h=o._featureGroup,l=this._group.options.spiderLegPolylineOptions;for(o._ignoreMove=!0,i=0;i<e.length;i++)s=a.layerPointToLatLng(t[i]),n=e[i],r=new L.Polyline([this._latlng,s],l),a.addLayer(r),n._spiderLeg=r,n._preSpiderfyLatlng=n._latlng,n.setLatLng(s),n.setZIndexOffset&&n.setZIndexOffset(1e6),h.addLayer(n);this.setOpacity(.3),o._ignoreMove=!1,o.fire("spiderfied",{cluster:this,markers:e})},_animationUnspiderfy:function(){this._noanimationUnspiderfy()}}),L.MarkerCluster.include({_animationSpiderfy:function(e,t){var i,n,r,s,o,a,h=this,l=this._group,u=l._map,_=l._featureGroup,d=this._latlng,c=u.latLngToLayerPoint(d),p=L.Path.SVG,f=L.extend({},this._group.options.spiderLegPolylineOptions),m=f.opacity;for(void 0===m&&(m=L.MarkerClusterGroup.prototype.options.spiderLegPolylineOptions.opacity),p?(f.opacity=0,f.className=(f.className||"")+" leaflet-cluster-spider-leg"):f.opacity=m,l._ignoreMove=!0,i=0;i<e.length;i++)n=e[i],a=u.layerPointToLatLng(t[i]),r=new L.Polyline([d,a],f),u.addLayer(r),n._spiderLeg=r,p&&(s=r._path,o=s.getTotalLength()+.1,s.style.strokeDasharray=o,s.style.strokeDashoffset=o),n.setZIndexOffset&&n.setZIndexOffset(1e6),n.clusterHide&&n.clusterHide(),_.addLayer(n),n._setPos&&n._setPos(c);for(l._forceLayout(),l._animationStart(),i=e.length-1;i>=0;i--)a=u.layerPointToLatLng(t[i]),n=e[i],n._preSpiderfyLatlng=n._latlng,n.setLatLng(a),n.clusterShow&&n.clusterShow(),p&&(r=n._spiderLeg,s=r._path,s.style.strokeDashoffset=0,r.setStyle({opacity:m}));this.setOpacity(.3),l._ignoreMove=!1,setTimeout(function(){l._animationEnd(),l.fire("spiderfied",{cluster:h,markers:e})},200)},_animationUnspiderfy:function(e){var t,i,n,r,s,o,a=this,h=this._group,l=h._map,u=h._featureGroup,_=e?l._latLngToNewLayerPoint(this._latlng,e.zoom,e.center):l.latLngToLayerPoint(this._latlng),d=this.getAllChildMarkers(null,!0),c=L.Path.SVG;for(h._ignoreMove=!0,h._animationStart(),this.setOpacity(1),i=d.length-1;i>=0;i--)t=d[i],t._preSpiderfyLatlng&&(t.closePopup(),t.setLatLng(t._preSpiderfyLatlng),delete t._preSpiderfyLatlng,o=!0,t._setPos&&(t._setPos(_),o=!1),t.clusterHide&&(t.clusterHide(),o=!1),o&&u.removeLayer(t),c&&(n=t._spiderLeg,r=n._path,s=r.getTotalLength()+.1,r.style.strokeDashoffset=s,n.setStyle({opacity:0})));h._ignoreMove=!1,setTimeout(function(){var e=0;for(i=d.length-1;i>=0;i--)t=d[i],t._spiderLeg&&e++;for(i=d.length-1;i>=0;i--)t=d[i],t._spiderLeg&&(t.clusterShow&&t.clusterShow(),t.setZIndexOffset&&t.setZIndexOffset(0),e>1&&u.removeLayer(t),l.removeLayer(t._spiderLeg),delete t._spiderLeg);h._animationEnd(),h.fire("unspiderfied",{cluster:a,markers:d})},200)}}),L.MarkerClusterGroup.include({_spiderfied:null,unspiderfy:function(){this._unspiderfy.apply(this,arguments)},_spiderfierOnAdd:function(){this._map.on("click",this._unspiderfyWrapper,this),this._map.options.zoomAnimation&&this._map.on("zoomstart",this._unspiderfyZoomStart,this),this._map.on("zoomend",this._noanimationUnspiderfy,this),L.Browser.touch||this._map.getRenderer(this)},_spiderfierOnRemove:function(){this._map.off("click",this._unspiderfyWrapper,this),this._map.off("zoomstart",this._unspiderfyZoomStart,this),this._map.off("zoomanim",this._unspiderfyZoomAnim,this),this._map.off("zoomend",this._noanimationUnspiderfy,this),this._noanimationUnspiderfy()
},_unspiderfyZoomStart:function(){this._map&&this._map.on("zoomanim",this._unspiderfyZoomAnim,this)},_unspiderfyZoomAnim:function(e){L.DomUtil.hasClass(this._map._mapPane,"leaflet-touching")||(this._map.off("zoomanim",this._unspiderfyZoomAnim,this),this._unspiderfy(e))},_unspiderfyWrapper:function(){this._unspiderfy()},_unspiderfy:function(e){this._spiderfied&&this._spiderfied.unspiderfy(e)},_noanimationUnspiderfy:function(){this._spiderfied&&this._spiderfied._noanimationUnspiderfy()},_unspiderfyLayer:function(e){e._spiderLeg&&(this._featureGroup.removeLayer(e),e.clusterShow&&e.clusterShow(),e.setZIndexOffset&&e.setZIndexOffset(0),this._map.removeLayer(e._spiderLeg),delete e._spiderLeg)}}),L.MarkerClusterGroup.include({refreshClusters:function(e){return e?e instanceof L.MarkerClusterGroup?e=e._topClusterLevel.getAllChildMarkers():e instanceof L.LayerGroup?e=e._layers:e instanceof L.MarkerCluster?e=e.getAllChildMarkers():e instanceof L.Marker&&(e=[e]):e=this._topClusterLevel.getAllChildMarkers(),this._flagParentsIconsNeedUpdate(e),this._refreshClustersIcons(),this.options.singleMarkerMode&&this._refreshSingleMarkerModeMarkers(e),this},_flagParentsIconsNeedUpdate:function(e){var t,i;for(t in e)for(i=e[t].__parent;i;)i._iconNeedsUpdate=!0,i=i.__parent},_refreshSingleMarkerModeMarkers:function(e){var t,i;for(t in e)i=e[t],this.hasLayer(i)&&i.setIcon(this._overrideMarkerIcon(i))}}),L.Marker.include({refreshIconOptions:function(e,t){var i=this.options.icon;return L.setOptions(i,e),this.setIcon(i),t&&this.__parent&&this.__parent._group.refreshClusters(this),this}}),e.MarkerClusterGroup=t,e.MarkerCluster=i});
//# sourceMappingURL=leaflet.markercluster.js.map | PypiClean |
/Chaturanga-0.1.8.linux-x86_64.tar.gz/usr/local/lib/python2.7/dist-packages/chaturanga/check.py |
def next_point(ref, points, axis, positive):
"""
Returns next_point in points w.r.t. ref on given axis and direction.
{int: axis} = {0: row, 1: column, 2: anti-diagonal, 3: diagonal}
"""
if axis == 0:
line = list(filter(lambda p: p[0] == ref[0], points))
if positive:
line = list(filter(lambda p: p[1] > ref[1], line))
if line != []:
return min(line, key=lambda p: p[1])
else:
line = list(filter(lambda p: p[1] < ref[1], line))
if line != []:
return max(line, key=lambda p: p[1])
if axis == 1:
line = list(filter(lambda p: p[1] == ref[1], points))
if axis == 2:
line = list(filter(lambda p: p[0] + p[1] == ref[0] + ref[1], points))
if axis == 3:
line = list(filter(lambda p: p[0] - p[1] == ref[0] - ref[1], points))
if positive:
line = list(filter(lambda p: p[0] > ref[0], line))
else:
line = list(filter(lambda p: p[0] < ref[0], line))
if line != []:
if positive:
return min(line, key=lambda p: p[0])
return max(line, key=lambda p: p[0])
return None
def flip(board):
"""Returns horizontal mirror image of board with inverted colors."""
flipped_board = dict()
for square, piece in board.items():
flipped_board[(7 - square[0], square[1])] = piece.swapcase()
return flipped_board
def is_check(board):
"""Returns True if White in Check, False otherwise."""
pieces = board.keys()
enemy_knights = []
enemy_pawns = []
for square, piece in board.items():
if piece == 'K':
king = square
if piece == 'k':
enemy_king = square
if piece == 'n':
enemy_knights.append(square)
if piece == 'p':
enemy_pawns.append(square)
# check for attack by enemy bishops, rooks, and queens
for axis in range(4):
for positive in [True, False]:
square = next_point(king, pieces, axis, positive)
if square != None:
if (axis in [0, 1]) and (board[square] in 'qr'):
return True
if (axis in [2, 3]) and (board[square] in 'qb'):
return True
# check for attack by enemy knights
for knight in enemy_knights:
if (king[0] - knight[0])**2 + (king[1] - knight[1])**2 == 5:
return True
# check for attack by enemy pawns
for pawn in enemy_pawns:
if (king[0] - pawn[0] == 1) and (abs(king[1] - pawn[1]) == 1):
return True
# check for attack by enemy king
if (king[0] - enemy_king[0])**2 + (king[1] - enemy_king[1])**2 < 3:
return True
return False | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/full/lang/cy.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['cy']={"editor":"Golygydd Testun Cyfoethog","editorPanel":"Panel Golygydd Testun Cyfoethog","common":{"editorHelp":"Gwasgwch ALT 0 am gymorth","browseServer":"Pori'r Gweinydd","url":"URL","protocol":"Protocol","upload":"Lanlwytho","uploadSubmit":"Anfon i'r Gweinydd","image":"Delwedd","flash":"Flash","form":"Ffurflen","checkbox":"Blwch ticio","radio":"Botwm Radio","textField":"Maes Testun","textarea":"Ardal Testun","hiddenField":"Maes Cudd","button":"Botwm","select":"Maes Dewis","imageButton":"Botwm Delwedd","notSet":"<heb osod>","id":"Id","name":"Name","langDir":"Cyfeiriad Iaith","langDirLtr":"Chwith i'r Dde (LTR)","langDirRtl":"Dde i'r Chwith (RTL)","langCode":"Cod Iaith","longDescr":"URL Disgrifiad Hir","cssClass":"Dosbarthiadau Dalen Arddull","advisoryTitle":"Teitl Cynghorol","cssStyle":"Arddull","ok":"Iawn","cancel":"Diddymu","close":"Cau","preview":"Rhagolwg","resize":"Ailfeintio","generalTab":"Cyffredinol","advancedTab":"Uwch","validateNumberFailed":"'Dyw'r gwerth hwn ddim yn rhif.","confirmNewPage":"Byddwch chi'n colli unrhyw newidiadau i'r cynnwys sydd heb eu cadw. Ydych am barhau i lwytho tudalen newydd?","confirmCancel":"Cafodd rhai o'r opsiynau eu newid. Ydych chi wir am gau'r deialog?","options":"Opsiynau","target":"Targed","targetNew":"Ffenest Newydd (_blank)","targetTop":"Ffenest ar y Brig (_top)","targetSelf":"Yr un Ffenest (_self)","targetParent":"Ffenest y Rhiant (_parent)","langDirLTR":"Chwith i'r Dde (LTR)","langDirRTL":"Dde i'r Chwith (RTL)","styles":"Arddull","cssClasses":"Dosbarthiadau Dalen Arddull","width":"Lled","height":"Uchder","align":"Alinio","left":"Chwith","right":"Dde","center":"Canol","justify":"Unioni","alignLeft":"Alinio i'r Chwith","alignRight":"Alinio i'r Dde","alignCenter":"Align Center","alignTop":"Brig","alignMiddle":"Canol","alignBottom":"Gwaelod","alignNone":"None","invalidValue":"Gwerth annilys.","invalidHeight":"Mae'n rhaid i'r uchder fod yn rhif.","invalidWidth":"Mae'n rhaid i'r lled fod yn rhif.","invalidLength":"Value specified for the \"%1\" field must be a positive number with or without a valid measurement unit (%2).","invalidCssLength":"Mae'n rhaid i'r gwerth ar gyfer maes \"%1\" fod yn rhif positif gyda neu heb uned fesuriad CSS dilys (px, %, in, cm, mm, em, ex, pt, neu pc).","invalidHtmlLength":"Mae'n rhaid i'r gwerth ar gyfer maes \"%1\" fod yn rhif positif gyda neu heb uned fesuriad HTML dilys (px neu %).","invalidInlineStyle":"Mae'n rhaid i'r gwerth ar gyfer arddull mewn-llinell gynnwys un set neu fwy ar y fformat \"enw : gwerth\", wedi'u gwahanu gyda hanner colon.","cssLengthTooltip":"Rhowch rif am werth mewn picsel neu rhif gydag uned CSS dilys (px, %, in, cm, mm, em, pt neu pc).","unavailable":"%1<span class=\"cke_accessibility\">, ddim ar gael</span>","keyboard":{"8":"Backspace","13":"Enter","16":"Shift","17":"Ctrl","18":"Alt","32":"Space","35":"End","36":"Home","46":"Delete","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Command"},"keyboardShortcut":"Keyboard shortcut","optionDefault":"Default"},"about":{"copy":"Hawlfraint © $1. Cedwir pob hawl.","dlgTitle":"About CKEditor 4","moreInfo":"Am wybodaeth ynghylch trwyddedau, ewch i'n gwefan:"},"basicstyles":{"bold":"Bras","italic":"Italig","strike":"Llinell Trwyddo","subscript":"Is-sgript","superscript":"Uwchsgript","underline":"Tanlinellu"},"bidi":{"ltr":"Cyfeiriad testun o'r chwith i'r dde","rtl":"Cyfeiriad testun o'r dde i'r chwith"},"blockquote":{"toolbar":"Dyfyniad bloc"},"notification":{"closed":"Notification closed."},"toolbar":{"toolbarCollapse":"Cyfangu'r Bar Offer","toolbarExpand":"Ehangu'r Bar Offer","toolbarGroups":{"document":"Dogfen","clipboard":"Clipfwrdd/Dadwneud","editing":"Golygu","forms":"Ffurflenni","basicstyles":"Arddulliau Sylfaenol","paragraph":"Paragraff","links":"Dolenni","insert":"Mewnosod","styles":"Arddulliau","colors":"Lliwiau","tools":"Offer"},"toolbars":"Bariau offer y golygydd"},"clipboard":{"copy":"Copïo","copyError":"'Dyw gosodiadau diogelwch eich porwr ddim yn caniatàu'r golygydd i gynnal 'gweithredoedd copïo' yn awtomatig. Defnyddiwch y bysellfwrdd (Ctrl/Cmd+C).","cut":"Torri","cutError":"Nid yw gosodiadau diogelwch eich porwr yn caniatàu'r golygydd i gynnal 'gweithredoedd torri' yn awtomatig. Defnyddiwch y bysellfwrdd (Ctrl/Cmd+X).","paste":"Gludo","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","pasteArea":"Ardal Gludo","pasteMsg":"Paste your content inside the area below and press OK."},"colorbutton":{"auto":"Awtomatig","bgColorTitle":"Lliw Cefndir","colors":{"000":"Du","800000":"Marwn","8B4513":"Brown Cyfrwy","2F4F4F":"Llechen Tywyll","008080":"Corhwyad","000080":"Nefi","4B0082":"Indigo","696969":"Llwyd Tywyll","B22222":"Bric Tân","A52A2A":"Brown","DAA520":"Rhoden Aur","006400":"Gwyrdd Tywyll","40E0D0":"Gwyrddlas","0000CD":"Glas Canolig","800080":"Porffor","808080":"Llwyd","F00":"Coch","FF8C00":"Oren Tywyll","FFD700":"Aur","008000":"Gwyrdd","0FF":"Cyan","00F":"Glas","EE82EE":"Fioled","A9A9A9":"Llwyd Pwl","FFA07A":"Samwn Golau","FFA500":"Oren","FFFF00":"Melyn","00FF00":"Leim","AFEEEE":"Gwyrddlas Golau","ADD8E6":"Glas Golau","DDA0DD":"Eirinen","D3D3D3":"Llwyd Golau","FFF0F5":"Gwrid Lafant","FAEBD7":"Gwyn Hynafol","FFFFE0":"Melyn Golau","F0FFF0":"Melwn Gwyrdd Golau","F0FFFF":"Aswr","F0F8FF":"Glas Alys","E6E6FA":"Lafant","FFF":"Gwyn","1ABC9C":"Strong Cyan","2ECC71":"Emerald","3498DB":"Bright Blue","9B59B6":"Amethyst","4E5F70":"Grayish Blue","F1C40F":"Vivid Yellow","16A085":"Dark Cyan","27AE60":"Dark Emerald","2980B9":"Strong Blue","8E44AD":"Dark Violet","2C3E50":"Desaturated Blue","F39C12":"Orange","E67E22":"Carrot","E74C3C":"Pale Red","ECF0F1":"Bright Silver","95A5A6":"Light Grayish Cyan","DDD":"Light Gray","D35400":"Pumpkin","C0392B":"Strong Red","BDC3C7":"Silver","7F8C8D":"Grayish Cyan","999":"Dark Gray"},"more":"Mwy o Liwiau...","panelTitle":"Lliwiau","textColorTitle":"Lliw Testun"},"colordialog":{"clear":"Clirio","highlight":"Uwcholeuo","options":"Opsiynau Lliw","selected":"Lliw a Ddewiswyd","title":"Dewis lliw"},"templates":{"button":"Templedi","emptyListMsg":"(Dim templedi wedi'u diffinio)","insertOption":"Amnewid y cynnwys go iawn","options":"Opsiynau Templedi","selectPromptMsg":"Dewiswch dempled i'w agor yn y golygydd","title":"Templedi Cynnwys"},"contextmenu":{"options":"Opsiynau Dewislen Cyd-destun"},"copyformatting":{"label":"Copy Formatting","notification":{"copied":"Formatting copied","applied":"Formatting applied","canceled":"Formatting canceled","failed":"Formatting failed. You cannot apply styles without copying them first."}},"div":{"IdInputLabel":"Id","advisoryTitleInputLabel":"Teitl Cynghorol","cssClassInputLabel":"Dosbarthiadau Ffeil Arddull","edit":"Golygu Div","inlineStyleInputLabel":"Arddull Mewn Llinell","langDirLTRLabel":"Chwith i'r Dde (LTR)","langDirLabel":"Cyfeiriad yr Iaith","langDirRTLLabel":"Dde i'r Chwith (RTL)","languageCodeInputLabel":" Cod Iaith","remove":"Tynnu Div","styleSelectLabel":"Arddull","title":"Creu Cynhwysydd Div","toolbar":"Creu Cynhwysydd Div"},"elementspath":{"eleLabel":"Llwybr elfennau","eleTitle":"Elfen %1"},"filetools":{"loadError":"Error occurred during file read.","networkError":"Network error occurred during file upload.","httpError404":"HTTP error occurred during file upload (404: File not found).","httpError403":"HTTP error occurred during file upload (403: Forbidden).","httpError":"HTTP error occurred during file upload (error status: %1).","noUrlError":"Upload URL is not defined.","responseError":"Incorrect server response."},"find":{"find":"Chwilio","findOptions":"Opsiynau Chwilio","findWhat":"Chwilio'r term:","matchCase":"Cydweddu'r cas","matchCyclic":"Cydweddu'n gylchol","matchWord":"Cydweddu gair cyfan","notFoundMsg":"Nid oedd y testun wedi'i ddarganfod.","replace":"Amnewid Un","replaceAll":"Amnewid Pob","replaceSuccessMsg":"Amnewidiwyd %1 achlysur.","replaceWith":"Amnewid gyda:","title":"Chwilio ac Amnewid"},"fakeobjects":{"anchor":"Angor","flash":"Animeiddiant Flash","hiddenfield":"Maes Cudd","iframe":"IFrame","unknown":"Gwrthrych Anhysbys"},"flash":{"access":"Mynediad Sgript","accessAlways":"Pob amser","accessNever":"Byth","accessSameDomain":"R'un parth","alignAbsBottom":"Gwaelod Abs","alignAbsMiddle":"Canol Abs","alignBaseline":"Baslinell","alignTextTop":"Testun Top","bgcolor":"Lliw cefndir","chkFull":"Caniatàu Sgrin Llawn","chkLoop":"Lwpio","chkMenu":"Galluogi Dewislen Flash","chkPlay":"AwtoChwarae","flashvars":"Newidynnau ar gyfer Flash","hSpace":"BwlchLl","properties":"Priodweddau Flash","propertiesTab":"Priodweddau","quality":"Ansawdd","qualityAutoHigh":"Uchel Awto","qualityAutoLow":"Isel Awto","qualityBest":"Gorau","qualityHigh":"Uchel","qualityLow":"Isel","qualityMedium":"Canolig","scale":"Graddfa","scaleAll":"Dangos pob","scaleFit":"Ffit Union","scaleNoBorder":"Dim Ymyl","title":"Priodweddau Flash","vSpace":"BwlchF","validateHSpace":"Rhaid i'r BwlchLl fod yn rhif.","validateSrc":"Ni all yr URL fod yn wag.","validateVSpace":"Rhaid i'r BwlchF fod yn rhif.","windowMode":"Modd ffenestr","windowModeOpaque":"Afloyw","windowModeTransparent":"Tryloyw","windowModeWindow":"Ffenestr"},"font":{"fontSize":{"label":"Maint","voiceLabel":"Maint y Ffont","panelTitle":"Maint y Ffont"},"label":"Ffont","panelTitle":"Enw'r Ffont","voiceLabel":"Ffont"},"forms":{"button":{"title":"Priodweddau Botymau","text":"Testun (Gwerth)","type":"Math","typeBtn":"Botwm","typeSbm":"Anfon","typeRst":"Ailosod"},"checkboxAndRadio":{"checkboxTitle":"Priodweddau Blwch Ticio","radioTitle":"Priodweddau Botwm Radio","value":"Gwerth","selected":"Dewiswyd","required":"Required"},"form":{"title":"Priodweddau Ffurflen","menu":"Priodweddau Ffurflen","action":"Gweithred","method":"Dull","encoding":"Amgodio"},"hidden":{"title":"Priodweddau Maes Cudd","name":"Enw","value":"Gwerth"},"select":{"title":"Priodweddau Maes Dewis","selectInfo":"Gwyb Dewis","opAvail":"Opsiynau ar Gael","value":"Gwerth","size":"Maint","lines":"llinellau","chkMulti":"Caniatàu aml-ddewisiadau","required":"Required","opText":"Testun","opValue":"Gwerth","btnAdd":"Ychwanegu","btnModify":"Newid","btnUp":"Lan","btnDown":"Lawr","btnSetValue":"Gosod fel gwerth a ddewiswyd","btnDelete":"Dileu"},"textarea":{"title":"Priodweddau Ardal Testun","cols":"Colofnau","rows":"Rhesi"},"textfield":{"title":"Priodweddau Maes Testun","name":"Enw","value":"Gwerth","charWidth":"Lled Nod","maxChars":"Uchafswm y Nodau","required":"Required","type":"Math","typeText":"Testun","typePass":"Cyfrinair","typeEmail":"Ebost","typeSearch":"Chwilio","typeTel":"Rhif Ffôn","typeUrl":"URL"}},"format":{"label":"Fformat","panelTitle":"Fformat Paragraff","tag_address":"Cyfeiriad","tag_div":"Normal (DIV)","tag_h1":"Pennawd 1","tag_h2":"Pennawd 2","tag_h3":"Pennawd 3","tag_h4":"Pennawd 4","tag_h5":"Pennawd 5","tag_h6":"Pennawd 6","tag_p":"Normal","tag_pre":"Wedi'i Fformatio"},"horizontalrule":{"toolbar":"Mewnosod Llinell Lorweddol"},"iframe":{"border":"Dangos ymyl y ffrâm","noUrl":"Rhowch URL yr iframe","scrolling":"Galluogi bariau sgrolio","title":"Priodweddau IFrame","toolbar":"IFrame"},"image":{"alt":"Testun Amgen","border":"Ymyl","btnUpload":"Anfon i'r Gweinydd","button2Img":"Ydych am drawsffurfio'r botwm ddelwedd hwn ar ddelwedd syml?","hSpace":"BwlchLl","img2Button":"Ydych am drawsffurfio'r ddelwedd hon ar fotwm delwedd?","infoTab":"Gwyb Delwedd","linkTab":"Dolen","lockRatio":"Cloi Cymhareb","menu":"Priodweddau Delwedd","resetSize":"Ailosod Maint","title":"Priodweddau Delwedd","titleButton":"Priodweddau Botwm Delwedd","upload":"Lanlwytho","urlMissing":"URL gwreiddiol y ddelwedd ar goll.","vSpace":"BwlchF","validateBorder":"Rhaid i'r ymyl fod yn gyfanrif.","validateHSpace":"Rhaid i'r HSpace fod yn gyfanrif.","validateVSpace":"Rhaid i'r VSpace fod yn gyfanrif."},"indent":{"indent":"Cynyddu'r Mewnoliad","outdent":"Lleihau'r Mewnoliad"},"smiley":{"options":"Opsiynau Gwenogluniau","title":"Mewnosod Gwenoglun","toolbar":"Gwenoglun"},"language":{"button":"Gosod iaith","remove":"Tynnu iaith"},"link":{"acccessKey":"Allwedd Mynediad","advanced":"Uwch","advisoryContentType":"Math y Cynnwys Cynghorol","advisoryTitle":"Teitl Cynghorol","anchor":{"toolbar":"Angor","menu":"Golygu'r Angor","title":"Priodweddau'r Angor","name":"Enw'r Angor","errorName":"Teipiwch enw'r angor","remove":"Tynnwch yr Angor"},"anchorId":"Gan Id yr Elfen","anchorName":"Gan Enw'r Angor","charset":"Set Nodau'r Adnodd Cysylltiedig","cssClasses":"Dosbarthiadau Dalen Arddull","download":"Force Download","displayText":"Display Text","emailAddress":"Cyfeiriad E-Bost","emailBody":"Corff y Neges","emailSubject":"Testun y Neges","id":"Id","info":"Gwyb y Ddolen","langCode":"Cod Iaith","langDir":"Cyfeiriad Iaith","langDirLTR":"Chwith i'r Dde (LTR)","langDirRTL":"Dde i'r Chwith (RTL)","menu":"Golygu Dolen","name":"Enw","noAnchors":"(Dim angorau ar gael yn y ddogfen)","noEmail":"Teipiwch gyfeiriad yr e-bost","noUrl":"Teipiwch URL y ddolen","noTel":"Please type the phone number","other":"<eraill>","phoneNumber":"Phone number","popupDependent":"Dibynnol (Netscape)","popupFeatures":"Nodweddion Ffenestr Bop","popupFullScreen":"Sgrin Llawn (IE)","popupLeft":"Safle Chwith","popupLocationBar":"Bar Safle","popupMenuBar":"Dewislen","popupResizable":"Ailfeintiol","popupScrollBars":"Barrau Sgrolio","popupStatusBar":"Bar Statws","popupToolbar":"Bar Offer","popupTop":"Safle Top","rel":"Perthynas","selectAnchor":"Dewiswch Angor","styles":"Arddull","tabIndex":"Indecs Tab","target":"Targed","targetFrame":"<ffrâm>","targetFrameName":"Enw Ffrâm y Targed","targetPopup":"<ffenestr bop>","targetPopupName":"Enw Ffenestr Bop","title":"Dolen","toAnchor":"Dolen at angor yn y testun","toEmail":"E-bost","toUrl":"URL","toPhone":"Phone","toolbar":"Dolen","type":"Math y Ddolen","unlink":"Datgysylltu","upload":"Lanlwytho"},"list":{"bulletedlist":"Mewnosod/Tynnu Rhestr Bwled","numberedlist":"Mewnosod/Tynnu Rhestr Rhifol"},"liststyle":{"bulletedTitle":"Priodweddau Rhestr Fwled","circle":"Cylch","decimal":"Degol (1, 2, 3, ayyb.)","disc":"Disg","lowerAlpha":"Alffa Is (a, b, c, d, e, ayyb.)","lowerRoman":"Rhufeinig Is (i, ii, iii, iv, v, ayyb.)","none":"Dim","notset":"<heb osod>","numberedTitle":"Priodweddau Rhestr Rifol","square":"Sgwâr","start":"Dechrau","type":"Math","upperAlpha":"Alffa Uwch (A, B, C, D, E, ayyb.)","upperRoman":"Rhufeinig Uwch (I, II, III, IV, V, ayyb.)","validateStartNumber":"Rhaid bod y rhif cychwynnol yn gyfanrif."},"magicline":{"title":"Mewnosod paragraff yma"},"maximize":{"maximize":"Mwyhau","minimize":"Lleihau"},"newpage":{"toolbar":"Tudalen Newydd"},"pagebreak":{"alt":"Toriad Tudalen","toolbar":"Mewnosod Toriad Tudalen i Argraffu"},"pastetext":{"button":"Gludo fel testun plaen","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","title":"Gludo fel Testun Plaen"},"pastefromword":{"confirmCleanup":"Mae'r testun rydych chi am ludo wedi'i gopïo o Word. Ydych chi am ei lanhau cyn ei ludo?","error":"Doedd dim modd glanhau y data a ludwyd oherwydd gwall mewnol","title":"Gludo o Word","toolbar":"Gludo o Word"},"preview":{"preview":"Rhagolwg"},"print":{"toolbar":"Argraffu"},"removeformat":{"toolbar":"Tynnu Fformat"},"save":{"toolbar":"Cadw"},"selectall":{"toolbar":"Dewis Popeth"},"showblocks":{"toolbar":"Dangos Blociau"},"sourcearea":{"toolbar":"HTML"},"specialchar":{"options":"Opsiynau Nodau Arbennig","title":"Dewis Nod Arbennig","toolbar":"Mewnosod Nod Arbennig"},"scayt":{"btn_about":"Ynghylch SCAYT","btn_dictionaries":"Geiriaduron","btn_disable":"Analluogi SCAYT","btn_enable":"Galluogi SCAYT","btn_langs":"Ieithoedd","btn_options":"Opsiynau","text_title":"Gwirio'r Sillafu Wrth Deipio"},"stylescombo":{"label":"Arddulliau","panelTitle":"Arddulliau Fformatio","panelTitle1":"Arddulliau Bloc","panelTitle2":"Arddulliau Mewnol","panelTitle3":"Arddulliau Gwrthrych"},"table":{"border":"Maint yr Ymyl","caption":"Pennawd","cell":{"menu":"Cell","insertBefore":"Mewnosod Cell Cyn","insertAfter":"Mewnosod Cell Ar Ôl","deleteCell":"Dileu Celloedd","merge":"Cyfuno Celloedd","mergeRight":"Cyfuno i'r Dde","mergeDown":"Cyfuno i Lawr","splitHorizontal":"Hollti'r Gell yn Lorweddol","splitVertical":"Hollti'r Gell yn Fertigol","title":"Priodweddau'r Gell","cellType":"Math y Gell","rowSpan":"Rhychwant Rhesi","colSpan":"Rhychwant Colofnau","wordWrap":"Lapio Geiriau","hAlign":"Aliniad Llorweddol","vAlign":"Aliniad Fertigol","alignBaseline":"Baslinell","bgColor":"Lliw Cefndir","borderColor":"Lliw Ymyl","data":"Data","header":"Pennyn","yes":"Ie","no":"Na","invalidWidth":"Mae'n rhaid i led y gell fod yn rhif.","invalidHeight":"Mae'n rhaid i uchder y gell fod yn rhif.","invalidRowSpan":"Mae'n rhaid i rychwant y rhesi fod yn gyfanrif.","invalidColSpan":"Mae'n rhaid i rychwant y colofnau fod yn gyfanrif.","chooseColor":"Dewis"},"cellPad":"Padio'r gell","cellSpace":"Bylchiad y gell","column":{"menu":"Colofn","insertBefore":"Mewnosod Colofn Cyn","insertAfter":"Mewnosod Colofn Ar Ôl","deleteColumn":"Dileu Colofnau"},"columns":"Colofnau","deleteTable":"Dileu Tabl","headers":"Penynnau","headersBoth":"Y Ddau","headersColumn":"Colofn gyntaf","headersNone":"Dim","headersRow":"Rhes gyntaf","heightUnit":"height unit","invalidBorder":"Mae'n rhaid i faint yr ymyl fod yn rhif.","invalidCellPadding":"Mae'n rhaid i badiad y gell fod yn rhif positif.","invalidCellSpacing":"Mae'n rhaid i fylchiad y gell fod yn rhif positif.","invalidCols":"Mae'n rhaid cael o leiaf un golofn.","invalidHeight":"Mae'n rhaid i uchder y tabl fod yn rhif.","invalidRows":"Mae'n rhaid cael o leiaf un rhes.","invalidWidth":"Mae'n rhaid i led y tabl fod yn rhif.","menu":"Priodweddau'r Tabl","row":{"menu":"Rhes","insertBefore":"Mewnosod Rhes Cyn","insertAfter":"Mewnosod Rhes Ar Ôl","deleteRow":"Dileu Rhesi"},"rows":"Rhesi","summary":"Crynodeb","title":"Priodweddau'r Tabl","toolbar":"Tabl","widthPc":"y cant","widthPx":"picsel","widthUnit":"uned lled"},"undo":{"redo":"Ailwneud","undo":"Dadwneud"},"widget":{"move":"Clcio a llusgo i symud","label":"%1 widget"},"uploadwidget":{"abort":"Upload aborted by the user.","doneOne":"File successfully uploaded.","doneMany":"Successfully uploaded %1 files.","uploadOne":"Uploading file ({percentage}%)...","uploadMany":"Uploading files, {current} of {max} done ({percentage}%)..."},"wsc":{"btnIgnore":"Anwybyddu Un","btnIgnoreAll":"Anwybyddu Pob","btnReplace":"Amnewid Un","btnReplaceAll":"Amnewid Pob","btnUndo":"Dadwneud","changeTo":"Newid i","errorLoading":"Error loading application service host: %s.","ieSpellDownload":"Gwirydd sillafu heb ei arsefydlu. A ydych am ei lawrlwytho nawr?","manyChanges":"Gwirio sillafu wedi gorffen: Newidiwyd %1 gair","noChanges":"Gwirio sillafu wedi gorffen: Dim newidiadau","noMispell":"Gwirio sillafu wedi gorffen: Dim camsillaf.","noSuggestions":"- Dim awgrymiadau -","notAvailable":"Nid yw'r gwasanaeth hwn ar gael yn bresennol.","notInDic":"Nid i'w gael yn y geiriadur","oneChange":"Gwirio sillafu wedi gorffen: Newidiwyd 1 gair","progress":"Gwirio sillafu yn ar y gweill...","title":"Gwirio Sillafu","toolbar":"Gwirio Sillafu"}}; | PypiClean |
/FoBiS.py-3.0.5.tar.gz/FoBiS.py-3.0.5/fobis/Fobos.py | # Copyright (C) 2015 Stefano Zaghi
#
# This file is part of FoBiS.py.
#
# FoBiS.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FoBiS.py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FoBiS.py. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import *
from builtins import object
try:
import configparser as configparser
except ImportError:
import configparser
from copy import deepcopy
import os
import re
import sys
from .utils import check_results, print_fake, syswork
class Fobos(object):
"""
Fobos is an object that handles fobos file, its attributes and methods.
"""
def __init__(self, cliargs, print_n=None, print_w=None):
"""
Parameters
----------
cliargs : argparse object
print_n : {None}
function for printing normal message
print_w : {None}
function for printing emphized warning message
"""
if print_n is None:
self.print_n = print_fake
else:
self.print_n = print_n
if print_w is None:
self.print_w = print_fake
else:
self.print_w = print_w
self.fobos = None
self.mode = None
self.local_variables = {}
if cliargs.fobos:
filename = cliargs.fobos
else:
filename = 'fobos'
if os.path.exists(filename):
self.fobos = configparser.RawConfigParser()
if not cliargs.fobos_case_insensitive:
self.fobos.optionxform = str # case sensitive
self.fobos.read(filename)
self._set_cliargs(cliargs=cliargs)
return
def _check_mode(self, mode):
"""
Function for checking the presence of the selected mode into the set defined inside the fobos.
Parameters
----------
mode : str
name of the selcted mode
"""
if self.fobos:
if self.fobos.has_option('modes', 'modes'):
if mode in self.fobos.get('modes', 'modes'):
self.mode = mode
else:
self.print_w('Error: the mode "' + mode + '" is not defined into the fobos file.')
self.modes_list()
sys.exit(1)
else:
self.print_w('Error: fobos file has not "modes" section.')
sys.exit(1)
return
def _set_mode(self, mode=None):
"""
Function for setting the selected mode.
Parameters
----------
mode : {None}
selected mode
"""
if self.fobos:
if mode:
self._check_mode(mode=mode)
else:
if self.fobos.has_option('modes', 'modes'):
self.mode = self.fobos.get('modes', 'modes').split()[0] # first mode selected
else:
if self.fobos.has_section('default'):
self.mode = 'default'
else:
self.print_w('Warning: fobos file has not "modes" section neither "default" one')
return
def _check_template(self):
"""
Function for checking the correct use of "template" sections.
"""
if self.fobos:
for mode in self.fobos.sections():
if self.fobos.has_option(mode, 'template'):
if self.fobos.has_section(self.fobos.get(mode, 'template')):
for item in self.fobos.items(self.fobos.get(mode, 'template')):
self.fobos.set(mode, item[0], item[1])
else:
self.print_w('Error: mode "' + mode + '" uses as template the mode "' + self.fobos.get(mode, 'template') + '" that is NOT defined')
sys.exit(1)
return
def _get_local_variables(self):
"""
Get the definition of local variables defined into any sections (modes).
"""
if self.fobos:
for section in self.fobos.sections():
for item in self.fobos.items(section):
if item[0].startswith('$'):
self.local_variables[item[0]] = item[1].replace('\n', ' ')
return
def _substitute_local_variables_mode(self):
"""
Substitute the definition of local variables defined into the mode (section) selected.
"""
if self.fobos and self.mode:
self._substitute_local_variables_section(section=self.mode)
return
def _substitute_local_variables_section(self, section):
"""
Substitute the definition of local variables defined into a section.
"""
if self.fobos:
if self.fobos.has_section(section):
for item in self.fobos.items(section):
item_val = item[1]
for key, value in list(self.local_variables.items()):
item_val = re.sub(re.escape(key), value, item_val)
# item_val = re.sub(r"(?!" + re.escape(key) + r"[aZ_-])\s*" + re.escape(key) + r"\s*", value, item_val)
self.fobos.set(section, item[0], item_val)
return
def _check_local_variables(self):
"""
Get and substitute the definition of local variables defined into any sections (modes).
"""
if self.fobos:
self._get_local_variables()
if len(self.local_variables) > 0:
self._substitute_local_variables_mode()
return
def _set_cliargs_attributes(self, cliargs, cliargs_dict):
"""
Set attributes of cliargs from fobos options.
Parameters
----------
cliargs : argparse object
cliargs_dict : argparse object attributes dictionary
"""
if self.mode:
for item in self.fobos.items(self.mode):
if item[0] in cliargs_dict:
if isinstance(cliargs_dict[item[0]], bool):
setattr(cliargs, item[0], self.fobos.getboolean(self.mode, item[0]))
elif isinstance(cliargs_dict[item[0]], int):
setattr(cliargs, item[0], int(item[1]))
elif isinstance(cliargs_dict[item[0]], list):
setattr(cliargs, item[0], item[1].split())
else:
setattr(cliargs, item[0], item[1])
return
@staticmethod
def _check_cliargs_cflags(cliargs, cliargs_dict):
"""
Method for setting attribute of cliargs.
Parameters
----------
cliargs : argparse object
cliargs_dict : argparse object attributes dictionary
"""
for item in cliargs_dict:
if item in ['cflags', 'lflags', 'preproc']:
val_cli = cliargs_dict[item]
val_fobos = getattr(cliargs, item)
if item == 'cflags':
if val_cli == '-c':
match = re.search(r'(-c\s+|-c$)', val_fobos)
if match:
val_cli = '' # avoid multiple -c flags
if val_fobos and val_cli:
setattr(cliargs, item, val_fobos + ' ' + val_cli)
return
def _set_cliargs(self, cliargs):
"""
Set cliargs from fobos options.
Parameters
----------
cliargs : argparse object
"""
if self.fobos:
cliargs_dict = deepcopy(cliargs.__dict__)
self._set_mode(mode=cliargs.mode)
self._check_template()
self._check_local_variables()
self._set_cliargs_attributes(cliargs=cliargs, cliargs_dict=cliargs_dict)
self._check_cliargs_cflags(cliargs=cliargs, cliargs_dict=cliargs_dict)
return
def get(self, option, mode=None, toprint=True):
"""
Get options defined into the fobos file.
Parameters
----------
option : str
option name
mode : {None}
eventual mode name
toprint : {True}
return of the value: if toprint==False the value is return otherwise is printed to stdout
"""
value = ''
if self.fobos:
self._set_mode(mode=mode)
if self.fobos.has_option(self.mode, option):
value = self.fobos.get(self.mode, option)
if toprint:
# self.print_w(value)
print(value)
return
else:
return value
def get_output_name(self, mode=None, toprint=True):
"""
Method for building the output name accordingly to the fobos options.
Parameters
----------
mode : {None}
eventual mode name
toprint : {True}
return of the value: if toprint==False the value is return otherwise is printed to stdout
"""
output = ''
build_dir = self.get(option='build_dir', mode=mode, toprint=False)
mklib = self.get(option='mklib', mode=mode, toprint=False)
if self.fobos:
self._set_mode(mode=mode)
if self.fobos.has_option(self.mode, 'output'):
output = self.fobos.get(self.mode, 'output')
output = os.path.normpath(os.path.join(build_dir, output))
elif self.fobos.has_option(self.mode, 'target'):
output = self.fobos.get(self.mode, 'target')
output = os.path.splitext(os.path.basename(output))[0]
if mklib.lower() == 'shared':
output = output + '.so'
elif mklib.lower() == 'static':
output = output + '.a'
output = os.path.normpath(os.path.join(build_dir, output))
if toprint:
# self.print_w(output)
print(output)
return
else:
return output
def modes_list(self):
"""List defined modes."""
if self.fobos:
self.print_n('The fobos file defines the following modes:')
if self.fobos.has_option('modes', 'modes'):
modes = self.fobos.get('modes', 'modes').split()
for mode in modes:
if self.fobos.has_section(mode):
if self.fobos.has_option(mode, 'help'):
helpmsg = self.fobos.get(mode, 'help')
else:
helpmsg = ''
self.print_n(' - "' + mode + '" ' + helpmsg)
else:
self.print_w('Error: no modes are defined into the fobos file!')
sys.exit(1)
sys.exit(0)
return
@staticmethod
def print_template(cliargs):
"""
Print fobos template.
Parameters
----------
cliargs : argparse object
"""
print("[default]")
for argument in vars(cliargs):
attribute = getattr(cliargs, argument)
if isinstance(attribute, list):
attribute = ' '.join(attribute)
print(str(argument) + " = " + str(attribute))
def rules_list(self, quiet=False):
"""
Function for listing defined rules.
Parameters
----------
quiet : {False}
less verbose outputs than default
"""
if self.fobos:
self.print_n('The fobos file defines the following rules:')
for rule in self.fobos.sections():
if rule.startswith('rule-'):
if self.fobos.has_option(rule, 'help'):
helpmsg = self.fobos.get(rule, 'help')
else:
helpmsg = ''
self.print_n(' - "' + rule.split('rule-')[1] + '" ' + helpmsg)
if self.fobos.has_option(rule, 'quiet'):
quiet = self.fobos.getboolean(rule, 'quiet')
for rul in self.fobos.options(rule):
if rul.startswith('rule'):
if not quiet:
self.print_n(' Command => ' + self.fobos.get(rule, rul))
sys.exit(0)
return
def rule_execute(self, rule, quiet=False, log=False):
"""
Function for executing selected rule.
Parameters
----------
rule : str
rule name
quiet : {False}
less verbose outputs than default
log : {False}
bool for activate errors log saving
"""
if self.fobos:
self.print_n('Executing rule "' + rule + '"')
rule_name = 'rule-' + rule
if self.fobos.has_section(rule_name):
self._get_local_variables()
self._substitute_local_variables_section(section=rule_name)
results = []
quiet = False
log = False
if self.fobos.has_option(rule_name, 'quiet'):
quiet = self.fobos.getboolean(rule_name, 'quiet')
if self.fobos.has_option(rule_name, 'log'):
log = self.fobos.getboolean(rule_name, 'log')
for rul in self.fobos.options(rule_name):
if rul.startswith('rule'):
if not quiet:
self.print_n(' Command => ' + self.fobos.get(rule_name, rul))
result = syswork(self.fobos.get(rule_name, rul))
results.append(result)
if log:
check_results(results=results, log='rules_errors.log', print_w=self.print_w)
else:
check_results(results=results, print_w=self.print_w)
else:
self.print_w('Error: the rule "' + rule + '" is not defined into the fobos file. Defined rules are:')
self.rules_list(quiet=quiet)
sys.exit(1)
return | PypiClean |
/Electrum-VTC-2.9.3.3.tar.gz/Electrum-VTC-2.9.3.3/gui/kivy/uix/dialogs/checkpoint_dialog.py | from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from electrum_vtc.i18n import _
Builder.load_string('''
#:import _ electrum_vtc_gui.kivy.i18n._
<CheckpointDialog@Popup>
id: popup
title: _('Blockchain')
size_hint: 1, 1
cp_height: 0
cp_value: ''
BoxLayout:
orientation: 'vertical'
padding: '10dp'
spacing: '10dp'
TopLabel:
height: '48dp'
id: bc_height
text: _("Verified headers: %d blocks.")% app.num_blocks
TopLabel:
height: '48dp'
id: bc_status
text: _("Connected to %d nodes.")% app.num_nodes if app.num_nodes else _("Not connected?")
Widget:
size_hint: 1, 0.1
TopLabel:
text: _("In order to verify the history returned by your main server, Electrum downloads block headers from random nodes. These headers are then used to check that transactions sent by the server really are in the blockchain.")
font_size: '6pt'
Widget:
size_hint: 1, 0.1
GridLayout:
orientation: 'horizontal'
cols: 2
height: '36dp'
TopLabel:
text: _('Checkpoint') + ':'
height: '36dp'
TextInput:
id: height_input
multiline: False
input_type: 'number'
height: '36dp'
size_hint_y: None
text: '%d'%root.cp_height
on_focus: root.on_height_str()
TopLabel:
text: _('Block hash') + ':'
TxHashLabel:
data: root.cp_value
Widget:
size_hint: 1, 0.1
Label:
font_size: '6pt'
text: _('If there is a fork of the blockchain, you need to configure your checkpoint in order to make sure that you are on the correct side of the fork. Enter a block number to fetch a checkpoint from your main server, and check its value from independent sources.')
halign: 'left'
text_size: self.width, None
size: self.texture_size
Widget:
size_hint: 1, 0.3
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Button:
text: _('Cancel')
size_hint: 0.5, None
height: '48dp'
on_release: popup.dismiss()
Button:
text: _('OK')
size_hint: 0.5, None
height: '48dp'
on_release:
root.callback(root.cp_height, root.cp_value)
popup.dismiss()
''')
class CheckpointDialog(Factory.Popup):
def __init__(self, network, callback):
Factory.Popup.__init__(self)
self.network = network
self.cp_height, self.cp_value = self.network.blockchain.get_checkpoint()
self.callback = callback
def on_height_str(self):
try:
new_height = int(self.ids.height_input.text)
except:
new_height = self.cp_height
self.ids.height_input.text = '%d'%new_height
if new_height == self.cp_height:
return
try:
header = self.network.synchronous_get(('blockchain.block.get_header', [new_height]), 5)
new_value = self.network.blockchain.hash_header(header)
except BaseException as e:
self.network.print_error(str(e))
new_value = ''
if new_value:
self.cp_height = new_height
self.cp_value = new_value | PypiClean |
/Argonaut-0.3.4.tar.gz/Argonaut-0.3.4/argonaut/public/ckeditor/_source/plugins/about/dialogs/about.js | /*
Copyright (c) 2003-2010, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.dialog.add( 'about', function( editor )
{
var lang = editor.lang.about;
return {
title : CKEDITOR.env.ie ? lang.dlgTitle : lang.title,
minWidth : 390,
minHeight : 230,
contents : [
{
id : 'tab1',
label : '',
title : '',
expand : true,
padding : 0,
elements :
[
{
type : 'html',
html :
'<style type="text/css">' +
'.cke_about_container' +
'{' +
'color:#000 !important;' +
'padding:10px 10px 0;' +
'margin-top:5px' +
'}' +
'.cke_about_container p' +
'{' +
'margin: 0 0 10px;' +
'}' +
'.cke_about_container .cke_about_logo' +
'{' +
'height:81px;' +
'background-color:#fff;' +
'background-image:url(' + CKEDITOR.plugins.get( 'about' ).path + 'dialogs/logo_ckeditor.png);' +
'background-position:center; ' +
'background-repeat:no-repeat;' +
'margin-bottom:10px;' +
'}' +
'.cke_about_container a' +
'{' +
'cursor:pointer !important;' +
'color:blue !important;' +
'text-decoration:underline !important;' +
'}' +
'</style>' +
'<div class="cke_about_container">' +
'<div class="cke_about_logo"></div>' +
'<p>' +
'CKEditor ' + CKEDITOR.version + ' (revision ' + CKEDITOR.revision + ')<br>' +
'<a href="http://ckeditor.com/">http://ckeditor.com</a>' +
'</p>' +
'<p>' +
lang.moreInfo + '<br>' +
'<a href="http://ckeditor.com/license">http://ckeditor.com/license</a>' +
'</p>' +
'<p>' +
lang.copy.replace( '$1', '<a href="http://cksource.com/">CKSource</a> - Frederico Knabben' ) +
'</p>' +
'</div>'
}
]
}
],
buttons : [ CKEDITOR.dialog.cancelButton ]
};
} ); | PypiClean |
/McStasScript-0.0.63.tar.gz/McStasScript-0.0.63/mcstasscript/data/pyvinylData.py | from libpyvinyl.BaseData import BaseData
from mcstasscript.data.McStasDataFormat import McStasFormat
from mcstasscript.data.MCPLDataFormat import MCPLDataFormat
class pyvinylMcStasData(BaseData):
def __init__(
self,
key,
data_dict=None,
filename=None,
file_format_class=None,
file_format_kwargs=None,
):
expected_data = {}
### DataClass developer's job start
expected_data["data"] = None
### DataClass developer's job end
super().__init__(
key,
expected_data,
data_dict,
filename,
file_format_class,
file_format_kwargs,
)
@classmethod
def supported_formats(self):
format_dict = {}
### DataClass developer's job start
self._add_ioformat(format_dict, McStasFormat)
### DataClass developer's job end
return format_dict
@classmethod
def from_file(cls, filename: str, format_class, key, **kwargs):
"""Create the data class by the file in the `format`."""
return cls(
key,
filename=filename,
file_format_class=format_class,
file_format_kwargs=kwargs,
)
@classmethod
def from_dict(cls, data_dict, key):
"""Create the data class by a python dictionary."""
return cls(key, data_dict=data_dict)
class pyvinylMCPLData(BaseData):
def __init__(
self,
key,
data_dict=None,
# the filename can be assigned later.
# If filename == "" or None it fails the consistency check of BaseData
filename="none",
file_format_class=None,
file_format_kwargs=None,
):
expected_data = {}
super().__init__(key, expected_data, None, filename, MCPLDataFormat, None)
def supported_formats(self):
format_dict = {}
self._add_ioformat(format_dict, MCPLDataFormat)
return format_dict
@classmethod
def from_file(cls, filename: str, key="mcpl"):
return cls(key, filename=filename) | PypiClean |
/Azule_Hair_Transplant-0.0.1.tar.gz/Azule_Hair_Transplant-0.0.1/models/face_parsing/resnet.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as modelzoo
# from modules.bn import InPlaceABNSync as BatchNorm2d
resnet18_url = 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, in_chan, out_chan, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_chan, out_chan, stride)
self.bn1 = nn.BatchNorm2d(out_chan)
self.conv2 = conv3x3(out_chan, out_chan)
self.bn2 = nn.BatchNorm2d(out_chan)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
if in_chan != out_chan or stride != 1:
self.downsample = nn.Sequential(
nn.Conv2d(in_chan, out_chan,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_chan),
)
def forward(self, x):
residual = self.conv1(x)
residual = F.relu(self.bn1(residual))
residual = self.conv2(residual)
residual = self.bn2(residual)
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(x)
out = shortcut + residual
out = self.relu(out)
return out
def create_layer_basic(in_chan, out_chan, bnum, stride=1):
layers = [BasicBlock(in_chan, out_chan, stride=stride)]
for i in range(bnum-1):
layers.append(BasicBlock(out_chan, out_chan, stride=1))
return nn.Sequential(*layers)
class Resnet18(nn.Module):
def __init__(self):
super(Resnet18, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
self.init_weight()
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.maxpool(x)
x = self.layer1(x)
feat8 = self.layer2(x) # 1/8
feat16 = self.layer3(feat8) # 1/16
feat32 = self.layer4(feat16) # 1/32
return feat8, feat16, feat32
def init_weight(self):
state_dict = modelzoo.load_url(resnet18_url)
self_state_dict = self.state_dict()
for k, v in state_dict.items():
if 'fc' in k: continue
self_state_dict.update({k: v})
self.load_state_dict(self_state_dict)
def get_params(self):
wd_params, nowd_params = [], []
for name, module in self.named_modules():
if isinstance(module, (nn.Linear, nn.Conv2d)):
wd_params.append(module.weight)
if not module.bias is None:
nowd_params.append(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nowd_params += list(module.parameters())
return wd_params, nowd_params
if __name__ == "__main__":
net = Resnet18()
x = torch.randn(16, 3, 224, 224)
out = net(x)
print(out[0].size())
print(out[1].size())
print(out[2].size())
net.get_params() | PypiClean |
/OASYS1-XRayServer-1.0.36.tar.gz/OASYS1-XRayServer-1.0.36/orangecontrib/xrayserver/widgets/xrayserver/ow_x0p.py | __author__ = "Luca Rebuffi"
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
import urllib
from http import server
from orangecontrib.xrayserver.util.xrayserver_util import HttpManager, XRayServerGui, XRAY_SERVER_URL, ShowHtmlDialog
from orangecontrib.xrayserver.widgets.gui.ow_xrayserver_widget import XrayServerWidget, XrayServerException
from oasys.util.oasys_util import ShowTextDialog
from PyQt5 import QtGui
from PyQt5.QtWebEngineWidgets import QWebEngineView as QWebView
APPLICATION = "/cgi/x0p_form.exe"
class X0p(XrayServerWidget):
name = "X0h Search"
description = "X0p"
icon = "icons/x0p.png"
maintainer = "Luca Rebuffi"
maintainer_email = "luca.rebuffi(@at@)elettra.eu"
priority = 2
category = "X0h"
keywords = ["data", "file", "load", "read"]
want_main_area = 1
xway = Setting(2)
wave = Setting(0.0)
line = Setting("Cu-Ka1")
code = Setting("Silicon")
hkl11 = Setting(-5)
hkl12 = Setting(-5)
hkl13 = Setting(-5)
hkl21 = Setting(5)
hkl22 = Setting(5)
hkl23 = Setting(5)
qb1 = Setting(0.0)
qb2 = Setting(90.0)
prcmin = Setting(0.0)
df1df2 = Setting(1)
base1 = Setting(1)
base2 = Setting(0)
base3 = Setting(0)
modesearch = Setting(0)
q1 = Setting(0.0)
q2 = Setting(180.0)
def __init__(self):
super().__init__()
left_box_1 = oasysgui.widgetBox(self.controlArea, "X0h-Search Request Form", addSpace=True, orientation="vertical",
width=400, height=630)
left_box_2 = oasysgui.widgetBox(left_box_1, "X-rays", addSpace=True, orientation="horizontal", width=380, height=110)
left_box_2_1 = oasysgui.widgetBox(left_box_2, "", addSpace=True, orientation="vertical", width=150, height=110)
gui.radioButtons(left_box_2_1, self, "xway", ["Wavelength (Å)", "Energy (keV)", "Characteristic line"], callback=self.set_xway )
self.box_wave = oasysgui.widgetBox(left_box_2, "", addSpace=True, orientation="vertical", width=190)
gui.separator(self.box_wave, height=10)
oasysgui.lineEdit(self.box_wave, self, "wave", label="", labelWidth=0, addSpace=False, valueType=float, orientation="horizontal")
self.box_line = oasysgui.widgetBox(left_box_2, "", addSpace=True, orientation="horizontal", width=190, height=110)
gui.separator(self.box_line, height=120)
XRayServerGui.combobox_text(self.box_line, self, "line", label="", labelWidth=0,
items=self.get_lines(),
sendSelectedValue=True, orientation="horizontal", selectedValue=self.line)
button = gui.button( self.box_line, self, "?", callback=self.help_lines)
button.setFixedWidth(15)
self.set_xway()
left_box_3 = oasysgui.widgetBox(left_box_1, "Crystal", addSpace=True, orientation="horizontal", width=380, height=60)
self.box_crystal = oasysgui.widgetBox(left_box_3, "", addSpace=True, orientation="horizontal", width=210)
XRayServerGui.combobox_text(self.box_crystal, self, "code", label="", labelWidth=0,
items=self.get_crystals(),
sendSelectedValue=True, orientation="horizontal", selectedValue=self.code)
button = gui.button( self.box_crystal, self, "?", callback=self.help_crystals)
button.setFixedWidth(15)
left_box_4 = oasysgui.widgetBox(left_box_1, "Bragg Planes Range", addSpace=True, orientation="horizontal", width=380, height=60)
oasysgui.lineEdit(left_box_4, self, "hkl11", label="From", labelWidth=50, addSpace=False, valueType=int, orientation="horizontal")
oasysgui.lineEdit(left_box_4, self, "hkl12", label=" ", labelWidth=1, addSpace=False, valueType=int, orientation="horizontal")
oasysgui.lineEdit(left_box_4, self, "hkl13", label=" ", labelWidth=1, addSpace=False, valueType=int, orientation="horizontal")
oasysgui.lineEdit(left_box_4, self, "hkl21", label=" To", labelWidth=50, addSpace=False, valueType=int, orientation="horizontal")
oasysgui.lineEdit(left_box_4, self, "hkl22", label=" ", labelWidth=1, addSpace=False, valueType=int, orientation="horizontal")
oasysgui.lineEdit(left_box_4, self, "hkl23", label=" ", labelWidth=1, addSpace=False, valueType=int, orientation="horizontal")
left_box_7 = oasysgui.widgetBox(left_box_1, "Bragg Angle Range", addSpace=True, orientation="horizontal", width=380, height=60)
oasysgui.lineEdit(left_box_7, self, "qb1", label="From", labelWidth=80, addSpace=False, valueType=float, orientation="horizontal")
oasysgui.lineEdit(left_box_7, self, "qb2", label=" To", labelWidth=80, addSpace=False, valueType=float, orientation="horizontal")
tab_central = oasysgui.tabWidget(left_box_1)
tab_1 = oasysgui.createTabPage(tab_central, "Intensity Control")
tab_2 = oasysgui.createTabPage(tab_central, "Find only Bragg planes making certain angles to the surface")
left_box_5 = oasysgui.widgetBox(tab_1, "", addSpace=True, orientation="vertical", width=370, height=250)
gui.separator(left_box_5)
oasysgui.lineEdit(left_box_5, self, "prcmin", label="Minimum |xh/x0| (%)", labelWidth=250, addSpace=False, valueType=float, orientation="horizontal")
left_box_5_1 = oasysgui.widgetBox(left_box_5, "Database Options for dispersion corrections df1, df2", addSpace=True, orientation="vertical", width=370, height=185)
gui.radioButtons(left_box_5_1, self, "df1df2", ["Auto (Henke at low energy, X0h at mid, Brennan-Cowan\nat high)",
"Use X0h data (5-25 keV or 0.5-2.5 A), recommended for\nBragg diffraction",
"Use Henke data (0.01-30 keV or 0.4-1200 A),\nrecommended for soft x-rays",
"Use Brennan-Cowan data (0.03-700 keV or 0.02-400 A)"])
left_box_6 = oasysgui.widgetBox(tab_2, "", addSpace=True, orientation="vertical", width=370, height=255)
gui.separator(left_box_6)
left_box_6_1 = oasysgui.widgetBox(left_box_6, "", addSpace=False, orientation="horizontal", width=370, height=30)
oasysgui.lineEdit(left_box_6_1, self, "base1", label="Surface Plane Indices", labelWidth=200, addSpace=False, valueType=int, orientation="horizontal")
oasysgui.lineEdit(left_box_6_1, self, "base2", label=" ", labelWidth=1, addSpace=False, valueType=int, orientation="horizontal")
oasysgui.lineEdit(left_box_6_1, self, "base3", label=" ", labelWidth=1, addSpace=False, valueType=int, orientation="horizontal")
gui.radioButtons(left_box_6, self, "modesearch", ["Planes make angles from Theta1 to Theta2",
"Planes make angles from Theta1 to (Bragg_Angle - Theta2)",
"Planes make angles from (Bragg_Angle - Theta1)\nto (Bragg_Angle - Theta2)"])
gui.separator(left_box_6, height=10)
left_box_6_2 = oasysgui.widgetBox(left_box_6, "", addSpace=True, orientation="horizontal", width=370, height=30)
oasysgui.lineEdit(left_box_6_2, self, "q1", label="Theta1", labelWidth=80, addSpace=False, valueType=float, orientation="horizontal")
oasysgui.lineEdit(left_box_6_2, self, "q2", label=" Theta2", labelWidth=80, addSpace=False, valueType=float, orientation="horizontal")
button = gui.button(self.controlArea, self, "Find Planes!", callback=self.submit)
button.setFixedHeight(30)
gui.rubber(self.controlArea)
self.tabs_widget = oasysgui.tabWidget(self.mainArea)
self.tab_output = oasysgui.createTabPage(self.tabs_widget, "X-ray Server Ouput")
self.x0h_output = QWebView(self.tab_output)
self.tab_output.layout().addWidget(self.x0h_output)
self.x0h_output.setFixedHeight(640)
self.x0h_output.setFixedWidth(740)
def set_xway(self):
self.box_wave.setVisible(self.xway!=2)
self.box_line.setVisible(self.xway==2)
def submit(self):
self.progressBarInit()
self.setStatusMessage("Submitting Request")
self.checkFields()
parameters = {}
parameters.update({"xway" : str(self.xway + 1)})
parameters.update({"wave" : str(self.wave)})
parameters.update({"line" : self.line})
parameters.update({"code" : self.code})
parameters.update({"hkl11" : str(self.hkl11)})
parameters.update({"hkl12" : str(self.hkl12)})
parameters.update({"hkl13" : str(self.hkl13)})
parameters.update({"hkl21" : str(self.hkl21)})
parameters.update({"hkl22" : str(self.hkl22)})
parameters.update({"hkl23" : str(self.hkl23)})
parameters.update({"qb1" : str(self.qb1)})
parameters.update({"qb2" : str(self.qb2)})
parameters.update({"prcmin" : str(self.prcmin)})
parameters.update({"df1df2" : self.decode_df1df2()})
parameters.update({"base1" : str(self.base1)})
parameters.update({"base2" : str(self.base2)})
parameters.update({"base3" : str(self.base3)})
parameters.update({"modesearch" : self.decode_modesearch()})
parameters.update({"q1" : str(self.q1)})
parameters.update({"q2" : str(self.q2)})
try:
response = HttpManager.send_xray_server_request_GET(APPLICATION, parameters)
response = response.split("<hr>")[0] + "\n </body></html>"
temp_1, temp_2 = response.split("style.css")
output = temp_1 + XRAY_SERVER_URL + "/style.css" + temp_2
response = response.split("<td><img src=\"images/x.gif\" width=31 height=32 border=0></td>")[0] + "</tr></tr></body></html>"
self.x0h_output.setHtml(response)
except urllib.error.HTTPError as e:
self.x0h_output.setHtml('The server couldn\'t fulfill the request.\nError Code: '
+ str(e.code) + "\n\n" +
server.BaseHTTPRequestHandler.responses[e.code][1])
except urllib.error.URLError as e:
self.x0h_output.setHtml('We failed to reach a server.\nReason: '
+ e.reason)
except XrayServerException as e:
ShowHtmlDialog.show_html("X-ray Server Error", e.response, width=750, height=500, parent=self)
except Exception as e:
ShowTextDialog.show_text("Error", 'Error Occurred.\nReason: ' + str(e), parent=self)
self.setStatusMessage("")
self.progressBarFinished()
def getLeftPartWidth(self):
return 415
def checkFields(self):
pass
def decode_df1df2(self):
if self.df1df2 == 0: return "-1"
elif self.df1df2 == 1: return "0"
elif self.df1df2 == 2: return "2"
elif self.df1df2 == 3: return "4"
def decode_modesearch(self):
if self.modesearch == 0: return "3"
elif self.modesearch == 1: return "2"
elif self.modesearch == 2: return "1"
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
w = X0p()
w.show()
app.exec()
w.saveSettings() | PypiClean |
/CellProfiler-4.2.6.tar.gz/CellProfiler-4.2.6/cellprofiler/modules/measureobjectoverlap.py | from functools import reduce
import centrosome.cpmorphology
import centrosome.fastemd
import centrosome.filter
import centrosome.index
import centrosome.propagate
import numpy
import scipy.ndimage
import scipy.sparse
from cellprofiler_core.constants.measurement import COLTYPE_FLOAT
from cellprofiler_core.module import Module
from cellprofiler_core.setting import Binary
from cellprofiler_core.setting.choice import Choice
from cellprofiler_core.setting.subscriber import LabelSubscriber
from cellprofiler_core.setting.text import Integer
from cellprofiler.modules import _help
C_IMAGE_OVERLAP = "Overlap"
FTR_F_FACTOR = "Ffactor"
FTR_PRECISION = "Precision"
FTR_RECALL = "Recall"
FTR_TRUE_POS_RATE = "TruePosRate"
FTR_FALSE_POS_RATE = "FalsePosRate"
FTR_FALSE_NEG_RATE = "FalseNegRate"
FTR_TRUE_NEG_RATE = "TrueNegRate"
FTR_RAND_INDEX = "RandIndex"
FTR_ADJUSTED_RAND_INDEX = "AdjustedRandIndex"
FTR_EARTH_MOVERS_DISTANCE = "EarthMoversDistance"
FTR_ALL = [
FTR_F_FACTOR,
FTR_PRECISION,
FTR_RECALL,
FTR_TRUE_POS_RATE,
FTR_TRUE_NEG_RATE,
FTR_FALSE_POS_RATE,
FTR_FALSE_NEG_RATE,
FTR_RAND_INDEX,
FTR_ADJUSTED_RAND_INDEX,
]
O_OBJ = "Segmented objects"
L_LOAD = "Loaded from a previous run"
L_CP = "From this CP pipeline"
DM_KMEANS = "K Means"
DM_SKEL = "Skeleton"
class MeasureObjectOverlap(Module):
category = "Measurement"
variable_revision_number = 2
module_name = "MeasureObjectOverlap"
def create_settings(self):
self.object_name_GT = LabelSubscriber(
"Select the objects to be used as the ground truth basis for calculating the amount of overlap",
"None",
doc="""\
Choose which set of objects will used as the “ground truth” objects. It
can be the product of segmentation performed by hand, or the result of
another segmentation algorithm whose results you would like to compare.
See the **Load** modules for more details on loading objects.""",
)
self.object_name_ID = LabelSubscriber(
"Select the objects to be tested for overlap against the ground truth",
"None",
doc="""\
This set of objects is what you will compare with the ground truth
objects. It is known as the “test object.”""",
)
self.wants_emd = Binary(
"Calculate earth mover's distance?",
False,
doc="""\
The earth mover’s distance computes the shortest distance that would
have to be travelled to move each foreground pixel in the test object to
some foreground pixel in the reference object. “Earth mover’s” refers to
an analogy: the pixels are “earth” that has to be moved by some machine
at the smallest possible cost.
It would take too much memory and processing time to compute the exact
earth mover’s distance, so **MeasureObjectOverlap** chooses
representative foreground pixels in each object and assigns each
foreground pixel to its closest representative. The earth mover’s
distance is then computed for moving the foreground pixels associated
with each representative in the test object to those in the reference
object.""",
)
self.max_points = Integer(
"Maximum # of points",
value=250,
minval=100,
doc="""\
*(Used only when computing the earth mover’s distance)*
This is the number of representative points that will be taken from the
foreground of the test objects and from the foreground of the reference
objects using the point selection method (see below).""",
)
self.decimation_method = Choice(
"Point selection method",
choices=[DM_KMEANS, DM_SKEL],
doc="""\
*(Used only when computing the earth mover’s distance)*
The point selection setting determines how the representative points
are chosen.
- *{DM_KMEANS}:* Select to pick representative points using a K-Means
clustering technique. The foregrounds of both objects are combined and
representatives are picked that minimize the distance to the nearest
representative. The same representatives are then used for the test
and reference objects.
- *{DM_SKEL}:* Select to skeletonize the object and pick points
equidistant along the skeleton.
|image0| *{DM_KMEANS}* is a choice that’s generally applicable to all
images. *{DM_SKEL}* is best suited to long, skinny objects such as
worms or neurites.
.. |image0| image:: {PROTIP_RECOMMEND_ICON}
""".format(
**{
"DM_KMEANS": DM_KMEANS,
"DM_SKEL": DM_SKEL,
"PROTIP_RECOMMEND_ICON": _help.PROTIP_RECOMMEND_ICON,
}
),
)
self.max_distance = Integer(
"Maximum distance",
value=250,
minval=1,
doc="""\
*(Used only when computing the earth mover’s distance)*
This setting sets an upper bound to the distance penalty assessed during
the movement calculation. As an example, the score for moving 10 pixels
from one location to a location that is 100 pixels away is 10\*100, but
if the maximum distance were set to 50, the score would be 10\*50
instead.
The maximum distance should be set to the largest reasonable distance
that pixels could be expected to move from one object to the next.""",
)
self.penalize_missing = Binary(
"Penalize missing pixels",
value=False,
doc="""\
*(Used only when computing the earth mover’s distance)*
If one object has more foreground pixels than the other, the earth
mover’s distance is not well-defined because there is no destination for
the extra source pixels or vice-versa. It’s reasonable to assess a
penalty for the discrepancy when comparing the accuracy of a
segmentation because the discrepancy represents an error. It’s also
reasonable to assess no penalty if the goal is to compute the cost of
movement, for example between two frames in a time-lapse movie, because
the discrepancy is likely caused by noise or artifacts in segmentation.
Set this setting to “Yes” to assess a penalty equal to the maximum
distance times the absolute difference in number of foreground pixels in
the two objects. Set this setting to “No” to assess no penalty.""",
)
def settings(self):
return [
self.object_name_GT,
self.object_name_ID,
self.wants_emd,
self.max_points,
self.decimation_method,
self.max_distance,
self.penalize_missing,
]
def visible_settings(self):
visible_settings = [self.object_name_GT, self.object_name_ID, self.wants_emd]
if self.wants_emd:
visible_settings += [
self.max_points,
self.decimation_method,
self.max_distance,
self.penalize_missing,
]
return visible_settings
def run(self, workspace):
object_name_GT = self.object_name_GT.value
objects_GT = workspace.get_objects(object_name_GT)
iGT, jGT, lGT = objects_GT.ijv.transpose()
object_name_ID = self.object_name_ID.value
objects_ID = workspace.get_objects(object_name_ID)
iID, jID, lID = objects_ID.ijv.transpose()
ID_obj = 0 if len(lID) == 0 else max(lID)
GT_obj = 0 if len(lGT) == 0 else max(lGT)
xGT, yGT = objects_GT.shape
xID, yID = objects_ID.shape
GT_pixels = numpy.zeros((xGT, yGT))
ID_pixels = numpy.zeros((xID, yID))
total_pixels = xGT * yGT
GT_pixels[iGT, jGT] = 1
ID_pixels[iID, jID] = 1
GT_tot_area = len(iGT)
if len(iGT) == 0 and len(iID) == 0:
intersect_matrix = numpy.zeros((0, 0), int)
else:
#
# Build a matrix with rows of i, j, label and a GT/ID flag
#
all_ijv = numpy.column_stack(
(
numpy.hstack((iGT, iID)),
numpy.hstack((jGT, jID)),
numpy.hstack((lGT, lID)),
numpy.hstack((numpy.zeros(len(iGT)), numpy.ones(len(iID)))),
)
)
#
# Order it so that runs of the same i, j are consecutive
#
order = numpy.lexsort((all_ijv[:, -1], all_ijv[:, 0], all_ijv[:, 1]))
all_ijv = all_ijv[order, :]
# Mark the first at each i, j != previous i, j
first = numpy.where(
numpy.hstack(
([True], ~numpy.all(all_ijv[:-1, :2] == all_ijv[1:, :2], 1), [True])
)
)[0]
# Count # at each i, j
count = first[1:] - first[:-1]
# First indexer - mapping from i,j to index in all_ijv
all_ijv_map = centrosome.index.Indexes([count])
# Bincount to get the # of ID pixels per i,j
id_count = numpy.bincount(all_ijv_map.rev_idx, all_ijv[:, -1]).astype(int)
gt_count = count - id_count
# Now we can create an indexer that has NxM elements per i,j
# where N is the number of GT pixels at that i,j and M is
# the number of ID pixels. We can then use the indexer to pull
# out the label values for each to populate a sparse array.
#
cross_map = centrosome.index.Indexes([id_count, gt_count])
off_gt = all_ijv_map.fwd_idx[cross_map.rev_idx] + cross_map.idx[0]
off_id = (
all_ijv_map.fwd_idx[cross_map.rev_idx]
+ cross_map.idx[1]
+ id_count[cross_map.rev_idx]
)
intersect_matrix = scipy.sparse.coo_matrix(
(numpy.ones(len(off_gt)), (all_ijv[off_id, 2], all_ijv[off_gt, 2])),
shape=(ID_obj + 1, GT_obj + 1),
).toarray()[1:, 1:]
gt_areas = objects_GT.areas
id_areas = objects_ID.areas
FN_area = gt_areas[numpy.newaxis, :] - intersect_matrix
all_intersecting_area = numpy.sum(intersect_matrix)
dom_ID = []
for i in range(0, ID_obj):
indices_jj = numpy.nonzero(lID == i)
indices_jj = indices_jj[0]
id_i = iID[indices_jj]
id_j = jID[indices_jj]
ID_pixels[id_i, id_j] = 1
for i in intersect_matrix: # loop through the GT objects first
if len(i) == 0 or max(i) == 0:
id = -1 # we missed the object; arbitrarily assign -1 index
else:
id = numpy.where(i == max(i))[0][0] # what is the ID of the max pixels?
dom_ID += [id] # for ea GT object, which is the dominating ID?
dom_ID = numpy.array(dom_ID)
for i in range(0, len(intersect_matrix.T)):
if len(numpy.where(dom_ID == i)[0]) > 1:
final_id = numpy.where(
intersect_matrix.T[i] == max(intersect_matrix.T[i])
)
final_id = final_id[0][0]
all_id = numpy.where(dom_ID == i)[0]
nonfinal = [x for x in all_id if x != final_id]
for (
n
) in nonfinal: # these others cannot be candidates for the corr ID now
intersect_matrix.T[i][n] = 0
else:
continue
TP = 0
FN = 0
FP = 0
for i in range(0, len(dom_ID)):
d = dom_ID[i]
if d == -1:
tp = 0
fn = id_areas[i]
fp = 0
else:
fp = numpy.sum(intersect_matrix[i][0:d]) + numpy.sum(
intersect_matrix[i][(d + 1) : :]
)
tp = intersect_matrix[i][d]
fn = FN_area[i][d]
TP += tp
FN += fn
FP += fp
TN = max(0, total_pixels - TP - FN - FP)
def nan_divide(numerator, denominator):
if denominator == 0:
return numpy.nan
return float(numerator) / float(denominator)
accuracy = nan_divide(TP, all_intersecting_area)
recall = nan_divide(TP, GT_tot_area)
precision = nan_divide(TP, (TP + FP))
F_factor = nan_divide(2 * (precision * recall), (precision + recall))
true_positive_rate = nan_divide(TP, (FN + TP))
false_positive_rate = nan_divide(FP, (FP + TN))
false_negative_rate = nan_divide(FN, (FN + TP))
true_negative_rate = nan_divide(TN, (FP + TN))
shape = numpy.maximum(
numpy.maximum(numpy.array(objects_GT.shape), numpy.array(objects_ID.shape)),
numpy.ones(2, int),
)
rand_index, adjusted_rand_index = self.compute_rand_index_ijv(
objects_GT.ijv, objects_ID.ijv, shape
)
m = workspace.measurements
m.add_image_measurement(self.measurement_name(FTR_F_FACTOR), F_factor)
m.add_image_measurement(self.measurement_name(FTR_PRECISION), precision)
m.add_image_measurement(self.measurement_name(FTR_RECALL), recall)
m.add_image_measurement(
self.measurement_name(FTR_TRUE_POS_RATE), true_positive_rate
)
m.add_image_measurement(
self.measurement_name(FTR_FALSE_POS_RATE), false_positive_rate
)
m.add_image_measurement(
self.measurement_name(FTR_TRUE_NEG_RATE), true_negative_rate
)
m.add_image_measurement(
self.measurement_name(FTR_FALSE_NEG_RATE), false_negative_rate
)
m.add_image_measurement(self.measurement_name(FTR_RAND_INDEX), rand_index)
m.add_image_measurement(
self.measurement_name(FTR_ADJUSTED_RAND_INDEX), adjusted_rand_index
)
def subscripts(condition1, condition2):
x1, y1 = numpy.where(GT_pixels == condition1)
x2, y2 = numpy.where(ID_pixels == condition2)
mask = set(zip(x1, y1)) & set(zip(x2, y2))
return list(mask)
TP_mask = subscripts(1, 1)
FN_mask = subscripts(1, 0)
FP_mask = subscripts(0, 1)
TN_mask = subscripts(0, 0)
TP_pixels = numpy.zeros((xGT, yGT))
FN_pixels = numpy.zeros((xGT, yGT))
FP_pixels = numpy.zeros((xGT, yGT))
TN_pixels = numpy.zeros((xGT, yGT))
def maskimg(mask, img):
for ea in mask:
img[ea] = 1
return img
TP_pixels = maskimg(TP_mask, TP_pixels)
FN_pixels = maskimg(FN_mask, FN_pixels)
FP_pixels = maskimg(FP_mask, FP_pixels)
TN_pixels = maskimg(TN_mask, TN_pixels)
if self.wants_emd:
emd = self.compute_emd(objects_ID, objects_GT)
m.add_image_measurement(
self.measurement_name(FTR_EARTH_MOVERS_DISTANCE), emd
)
if self.show_window:
workspace.display_data.true_positives = TP_pixels
workspace.display_data.true_negatives = TN_pixels
workspace.display_data.false_positives = FP_pixels
workspace.display_data.false_negatives = FN_pixels
workspace.display_data.statistics = [
(FTR_F_FACTOR, F_factor),
(FTR_PRECISION, precision),
(FTR_RECALL, recall),
(FTR_FALSE_POS_RATE, false_positive_rate),
(FTR_FALSE_NEG_RATE, false_negative_rate),
(FTR_RAND_INDEX, rand_index),
(FTR_ADJUSTED_RAND_INDEX, adjusted_rand_index),
]
if self.wants_emd:
workspace.display_data.statistics.append(
(FTR_EARTH_MOVERS_DISTANCE, emd)
)
# def compute_rand_index(self, test_labels, ground_truth_labels, mask):
# """Calculate the Rand Index
#
# http://en.wikipedia.org/wiki/Rand_index
#
# Given a set of N elements and two partitions of that set, X and Y
#
# A = the number of pairs of elements in S that are in the same set in
# X and in the same set in Y
# B = the number of pairs of elements in S that are in different sets
# in X and different sets in Y
# C = the number of pairs of elements in S that are in the same set in
# X and different sets in Y
# D = the number of pairs of elements in S that are in different sets
# in X and the same set in Y
#
# The rand index is: A + B
# -----
# A+B+C+D
#
#
# The adjusted rand index is the rand index adjusted for chance
# so as not to penalize situations with many segmentations.
#
# Jorge M. Santos, Mark Embrechts, "On the Use of the Adjusted Rand
# Index as a Metric for Evaluating Supervised Classification",
# Lecture Notes in Computer Science,
# Springer, Vol. 5769, pp. 175-184, 2009. Eqn # 6
#
# ExpectedIndex = best possible score
#
# ExpectedIndex = sum(N_i choose 2) * sum(N_j choose 2)
#
# MaxIndex = worst possible score = 1/2 (sum(N_i choose 2) + sum(N_j choose 2)) * total
#
# A * total - ExpectedIndex
# -------------------------
# MaxIndex - ExpectedIndex
#
# returns a tuple of the Rand Index and the adjusted Rand Index
# """
# ground_truth_labels = ground_truth_labels[mask].astype(numpy.uint64)
# test_labels = test_labels[mask].astype(numpy.uint64)
# if len(test_labels) > 0:
# #
# # Create a sparse matrix of the pixel labels in each of the sets
# #
# # The matrix, N(i,j) gives the counts of all of the pixels that were
# # labeled with label I in the ground truth and label J in the
# # test set.
# #
# N_ij = scipy.sparse.coo_matrix((numpy.ones(len(test_labels)),
# (ground_truth_labels, test_labels))).toarray()
#
# def choose2(x):
# '''Compute # of pairs of x things = x * (x-1) / 2'''
# return x * (x - 1) / 2
#
# #
# # Each cell in the matrix is a count of a grouping of pixels whose
# # pixel pairs are in the same set in both groups. The number of
# # pixel pairs is n * (n - 1), so A = sum(matrix * (matrix - 1))
# #
# A = numpy.sum(choose2(N_ij))
# #
# # B is the sum of pixels that were classified differently by both
# # sets. But the easier calculation is to find A, C and D and get
# # B by subtracting A, C and D from the N * (N - 1), the total
# # number of pairs.
# #
# # For C, we take the number of pixels classified as "i" and for each
# # "j", subtract N(i,j) from N(i) to get the number of pixels in
# # N(i,j) that are in some other set = (N(i) - N(i,j)) * N(i,j)
# #
# # We do the similar calculation for D
# #
# N_i = numpy.sum(N_ij, 1)
# N_j = numpy.sum(N_ij, 0)
# C = numpy.sum((N_i[:, numpy.newaxis] - N_ij) * N_ij) / 2
# D = numpy.sum((N_j[numpy.newaxis, :] - N_ij) * N_ij) / 2
# total = choose2(len(test_labels))
# # an astute observer would say, why bother computing A and B
# # when all we need is A+B and C, D and the total can be used to do
# # that. The calculations aren't too expensive, though, so I do them.
# B = total - A - C - D
# rand_index = (A + B) / total
# #
# # Compute adjusted Rand Index
# #
# expected_index = numpy.sum(choose2(N_i)) * numpy.sum(choose2(N_j))
# max_index = (numpy.sum(choose2(N_i)) + numpy.sum(choose2(N_j))) * total / 2
#
# adjusted_rand_index = \
# (A * total - expected_index) / (max_index - expected_index)
# else:
# rand_index = adjusted_rand_index = numpy.nan
# return rand_index, adjusted_rand_index
def compute_rand_index_ijv(self, gt_ijv, test_ijv, shape):
"""Compute the Rand Index for an IJV matrix
This is in part based on the Omega Index:
Collins, "Omega: A General Formulation of the Rand Index of Cluster
Recovery Suitable for Non-disjoint Solutions", Multivariate Behavioral
Research, 1988, 23, 231-242
The basic idea of the paper is that a pair should be judged to
agree only if the number of clusters in which they appear together
is the same.
"""
#
# The idea here is to assign a label to every pixel position based
# on the set of labels given to that position by both the ground
# truth and the test set. We then assess each pair of labels
# as agreeing or disagreeing as to the number of matches.
#
# First, add the backgrounds to the IJV with a label of zero
#
gt_bkgd = numpy.ones(shape, bool)
gt_bkgd[gt_ijv[:, 0], gt_ijv[:, 1]] = False
test_bkgd = numpy.ones(shape, bool)
test_bkgd[test_ijv[:, 0], test_ijv[:, 1]] = False
gt_ijv = numpy.vstack(
[
gt_ijv,
numpy.column_stack(
[
numpy.argwhere(gt_bkgd),
numpy.zeros(numpy.sum(gt_bkgd), gt_bkgd.dtype),
]
),
]
)
test_ijv = numpy.vstack(
[
test_ijv,
numpy.column_stack(
[
numpy.argwhere(test_bkgd),
numpy.zeros(numpy.sum(test_bkgd), test_bkgd.dtype),
]
),
]
)
#
# Create a unified structure for the pixels where a fourth column
# tells you whether the pixels came from the ground-truth or test
#
u = numpy.vstack(
[
numpy.column_stack(
[gt_ijv, numpy.zeros(gt_ijv.shape[0], gt_ijv.dtype)]
),
numpy.column_stack(
[test_ijv, numpy.ones(test_ijv.shape[0], test_ijv.dtype)]
),
]
)
#
# Sort by coordinates, then by identity
#
order = numpy.lexsort([u[:, 2], u[:, 3], u[:, 0], u[:, 1]])
u = u[order, :]
# Get rid of any duplicate labellings (same point labeled twice with
# same label.
#
first = numpy.hstack([[True], numpy.any(u[:-1, :] != u[1:, :], 1)])
u = u[first, :]
#
# Create a 1-d indexer to point at each unique coordinate.
#
first_coord_idxs = numpy.hstack(
[
[0],
numpy.argwhere(
(u[:-1, 0] != u[1:, 0]) | (u[:-1, 1] != u[1:, 1])
).flatten()
+ 1,
[u.shape[0]],
]
)
first_coord_counts = first_coord_idxs[1:] - first_coord_idxs[:-1]
indexes = centrosome.index.Indexes([first_coord_counts])
#
# Count the number of labels at each point for both gt and test
#
count_test = numpy.bincount(indexes.rev_idx, u[:, 3]).astype(numpy.int64)
count_gt = first_coord_counts - count_test
#
# For each # of labels, pull out the coordinates that have
# that many labels. Count the number of similarly labeled coordinates
# and record the count and labels for that group.
#
labels = []
for i in range(1, numpy.max(count_test) + 1):
for j in range(1, numpy.max(count_gt) + 1):
match = (count_test[indexes.rev_idx] == i) & (
count_gt[indexes.rev_idx] == j
)
if not numpy.any(match):
continue
#
# Arrange into an array where the rows are coordinates
# and the columns are the labels for that coordinate
#
lm = u[match, 2].reshape(numpy.sum(match) // (i + j), i + j)
#
# Sort by label.
#
order = numpy.lexsort(lm.transpose())
lm = lm[order, :]
#
# Find indices of unique and # of each
#
lm_first = numpy.hstack(
[
[0],
numpy.argwhere(numpy.any(lm[:-1, :] != lm[1:, :], 1)).flatten()
+ 1,
[lm.shape[0]],
]
)
lm_count = lm_first[1:] - lm_first[:-1]
for idx, count in zip(lm_first[:-1], lm_count):
labels.append((count, lm[idx, :j], lm[idx, j:]))
#
# We now have our sets partitioned. Do each against each to get
# the number of true positive and negative pairs.
#
max_t_labels = reduce(max, [len(t) for c, t, g in labels], 0)
max_g_labels = reduce(max, [len(g) for c, t, g in labels], 0)
#
# tbl is the contingency table from Table 4 of the Collins paper
# It's a table of the number of pairs which fall into M sets
# in the ground truth case and N in the test case.
#
tbl = numpy.zeros(((max_t_labels + 1), (max_g_labels + 1)))
for i, (c1, tobject_numbers1, gobject_numbers1) in enumerate(labels):
for j, (c2, tobject_numbers2, gobject_numbers2) in enumerate(labels[i:]):
nhits_test = numpy.sum(
tobject_numbers1[:, numpy.newaxis]
== tobject_numbers2[numpy.newaxis, :]
)
nhits_gt = numpy.sum(
gobject_numbers1[:, numpy.newaxis]
== gobject_numbers2[numpy.newaxis, :]
)
if j == 0:
N = c1 * (c1 - 1) / 2
else:
N = c1 * c2
tbl[nhits_test, nhits_gt] += N
N = numpy.sum(tbl)
#
# Equation 13 from the paper
#
min_JK = min(max_t_labels, max_g_labels) + 1
rand_index = numpy.sum(tbl[:min_JK, :min_JK] * numpy.identity(min_JK)) / N
#
# Equation 15 from the paper, the expected index
#
e_omega = (
numpy.sum(
numpy.sum(tbl[:min_JK, :min_JK], 0)
* numpy.sum(tbl[:min_JK, :min_JK], 1)
)
/ N ** 2
)
#
# Equation 16 is the adjusted index
#
adjusted_rand_index = (rand_index - e_omega) / (1 - e_omega)
return rand_index, adjusted_rand_index
def compute_emd(self, src_objects, dest_objects):
"""Compute the earthmovers distance between two sets of objects
src_objects - move pixels from these objects
dest_objects - move pixels to these objects
returns the earth mover's distance
"""
#
# if either foreground set is empty, the emd is the penalty.
#
for angels, demons in (
(src_objects, dest_objects),
(dest_objects, src_objects),
):
if angels.count == 0:
if self.penalize_missing:
return numpy.sum(demons.areas) * self.max_distance.value
else:
return 0
if self.decimation_method == DM_KMEANS:
isrc, jsrc = self.get_kmeans_points(src_objects, dest_objects)
idest, jdest = isrc, jsrc
else:
isrc, jsrc = self.get_skeleton_points(src_objects)
idest, jdest = self.get_skeleton_points(dest_objects)
src_weights, dest_weights = [
self.get_weights(i, j, self.get_labels_mask(objects))
for i, j, objects in (
(isrc, jsrc, src_objects),
(idest, jdest, dest_objects),
)
]
ioff, joff = [
src[:, numpy.newaxis] - dest[numpy.newaxis, :]
for src, dest in ((isrc, idest), (jsrc, jdest))
]
c = numpy.sqrt(ioff * ioff + joff * joff).astype(numpy.int32)
c[c > self.max_distance.value] = self.max_distance.value
extra_mass_penalty = self.max_distance.value if self.penalize_missing else 0
return centrosome.fastemd.emd_hat_int32(
src_weights.astype(numpy.int32),
dest_weights.astype(numpy.int32),
c,
extra_mass_penalty=extra_mass_penalty,
)
def get_labels_mask(self, obj):
labels_mask = numpy.zeros(obj.shape, bool)
for labels, indexes in obj.get_labels():
labels_mask = labels_mask | labels > 0
return labels_mask
def get_skeleton_points(self, obj):
"""Get points by skeletonizing the objects and decimating"""
ii = []
jj = []
total_skel = numpy.zeros(obj.shape, bool)
for labels, indexes in obj.get_labels():
colors = centrosome.cpmorphology.color_labels(labels)
for color in range(1, numpy.max(colors) + 1):
labels_mask = colors == color
skel = centrosome.cpmorphology.skeletonize(
labels_mask,
ordering=scipy.ndimage.distance_transform_edt(labels_mask)
* centrosome.filter.poisson_equation(labels_mask),
)
total_skel = total_skel | skel
n_pts = numpy.sum(total_skel)
if n_pts == 0:
return numpy.zeros(0, numpy.int32), numpy.zeros(0, numpy.int32)
i, j = numpy.where(total_skel)
if n_pts > self.max_points.value:
#
# Decimate the skeleton by finding the branchpoints in the
# skeleton and propagating from those.
#
markers = numpy.zeros(total_skel.shape, numpy.int32)
branchpoints = centrosome.cpmorphology.branchpoints(
total_skel
) | centrosome.cpmorphology.endpoints(total_skel)
markers[branchpoints] = numpy.arange(numpy.sum(branchpoints)) + 1
#
# We compute the propagation distance to that point, then impose
# a slightly arbitarary order to get an unambiguous ordering
# which should number the pixels in a skeleton branch monotonically
#
ts_labels, distances = centrosome.propagate.propagate(
numpy.zeros(markers.shape), markers, total_skel, 1
)
order = numpy.lexsort((j, i, distances[i, j], ts_labels[i, j]))
#
# Get a linear space of self.max_points elements with bounds at
# 0 and len(order)-1 and use that to select the points.
#
order = order[
numpy.linspace(0, len(order) - 1, self.max_points.value).astype(int)
]
return i[order], j[order]
return i, j
def get_kmeans_points(self, src_obj, dest_obj):
"""Get representative points in the objects using K means
src_obj - get some of the foreground points from the source objects
dest_obj - get the rest of the foreground points from the destination
objects
returns a vector of i coordinates of representatives and a vector
of j coordinates
"""
from sklearn.cluster import KMeans
ijv = numpy.vstack((src_obj.ijv, dest_obj.ijv))
if len(ijv) <= self.max_points.value:
return ijv[:, 0], ijv[:, 1]
random_state = numpy.random.RandomState()
random_state.seed(ijv.astype(int).flatten())
kmeans = KMeans(
n_clusters=self.max_points.value, tol=2, random_state=random_state
)
kmeans.fit(ijv[:, :2])
return (
kmeans.cluster_centers_[:, 0].astype(numpy.uint32),
kmeans.cluster_centers_[:, 1].astype(numpy.uint32),
)
def get_weights(self, i, j, labels_mask):
"""Return the weights to assign each i,j point
Assign each pixel in the labels mask to the nearest i,j and return
the number of pixels assigned to each i,j
"""
#
# Create a mapping of chosen points to their index in the i,j array
#
total_skel = numpy.zeros(labels_mask.shape, int)
total_skel[i, j] = numpy.arange(1, len(i) + 1)
#
# Compute the distance from each chosen point to all others in image,
# return the nearest point.
#
ii, jj = scipy.ndimage.distance_transform_edt(
total_skel == 0, return_indices=True, return_distances=False
)
#
# Filter out all unmasked points
#
ii, jj = [x[labels_mask] for x in (ii, jj)]
if len(ii) == 0:
return numpy.zeros(0, numpy.int32)
#
# Use total_skel to look up the indices of the chosen points and
# bincount the indices.
#
result = numpy.zeros(len(i), numpy.int32)
bc = numpy.bincount(total_skel[ii, jj])[1:]
result[: len(bc)] = bc
return result
def display(self, workspace, figure):
"""Display the image confusion matrix & statistics"""
figure.set_subplots((3, 2))
for x, y, image, label in (
(0, 0, workspace.display_data.true_positives, "True positives"),
(0, 1, workspace.display_data.false_positives, "False positives"),
(1, 0, workspace.display_data.false_negatives, "False negatives"),
(1, 1, workspace.display_data.true_negatives, "True negatives"),
):
figure.subplot_imshow_bw(
x, y, image, title=label, sharexy=figure.subplot(0, 0)
)
figure.subplot_table(
2,
0,
workspace.display_data.statistics,
col_labels=("Measurement", "Value"),
n_rows=2,
)
def measurement_name(self, feature):
return "_".join(
(
C_IMAGE_OVERLAP,
feature,
self.object_name_GT.value,
self.object_name_ID.value,
)
)
def get_categories(self, pipeline, object_name):
if object_name == "Image":
return [C_IMAGE_OVERLAP]
return []
def get_measurements(self, pipeline, object_name, category):
if object_name == "Image" and category == C_IMAGE_OVERLAP:
return self.all_features()
return []
def get_measurement_images(self, pipeline, object_name, category, measurement):
if measurement in self.get_measurements(pipeline, object_name, category):
return [self.test_img.value]
return []
def get_measurement_scales(
self, pipeline, object_name, category, measurement, image_name
):
if (
object_name == "Image"
and category == C_IMAGE_OVERLAP
and measurement in FTR_ALL
):
return ["_".join((self.object_name_GT.value, self.object_name_ID.value))]
return []
def all_features(self):
all_features = list(FTR_ALL)
if self.wants_emd:
all_features.append(FTR_EARTH_MOVERS_DISTANCE)
return all_features
def get_measurement_columns(self, pipeline):
return [
("Image", self.measurement_name(feature), COLTYPE_FLOAT,)
for feature in self.all_features()
] | PypiClean |
/Mopidy-Mopify-1.7.3.tar.gz/Mopidy-Mopify-1.7.3/mopidy_mopify/static/debug/src/vendor/angular-notifier/dist/angular-notifier.min.js | !function(){"use strict";angular.module("llNotifier",["ngAnimate"]).value("llNotificationsTemplateUrl","src/notifications.html").value("llNotificationTemplateUrl","src/notification.html").constant("llConstants",{DEFAULT_NOTIFICATION_TYPE:"default",DEFAULT_NOTIFICATION_POSITION:"top center",DEFAULT_DELAY:3e3,FADE_DELAY:1e3})}(),function(){"use strict";angular.module("llNotifier").factory("NotificationDecorator",function(){function a(){}return a.toObject=function(a){var b=a;return"object"!=typeof b&&(b={template:b}),b},a}).factory("Notification",["$compile","$timeout","llConstants","NotificationDecorator",function(a,b,c,d){function e(a){var b=d.toObject(a);this.template=b.template?b.template:"",this.type=b.type?b.type:c.DEFAULT_NOTIFICATION_TYPE,this.position=b.position?b.position:c.DEFAULT_NOTIFICATION_POSITION,this.hasDelay=angular.isUndefined(b.hasDelay)?!0:b.hasDelay===!0,this.delay=angular.isDefined(b.delay)?b.delay:c.DEFAULT_DELAY,this.scope=angular.isDefined(b.scope)?b.scope:{}}return e.prototype={timeout:function(a){this.hasDelay&&b(function(){a()},this.delay)}},e}])}(),function(){"use strict";angular.module("llNotifier").service("notifier",["$http","$rootScope","$templateCache","$compile","Notification","llNotificationsTemplateUrl",function(a,b,c,d,e,f){function g(b){a.get(f,{cache:c}).success(function(a){b(a)}).error(function(a){throw new Error("Template specified for llNotifier ("+f+") could not be loaded. "+a)})}function h(a,b){var c=d(a)(b),e=angular.element(document).find("body");e.append(c)}this.scope=b.$new(),this.scope.notifications=[],this.isFirstNotification=!0,this.notify=function(a){var b=this,c=new e(a);b.scope.notifications.push(c),b.isFirstNotification&&g(function(a){h(a,b.scope),b.isFirstNotification=!1})}}])}(),function(){"use strict";angular.module("llNotifier").directive("llNotification",["$timeout","llNotificationTemplateUrl","llConstants",function(a,b,c){return{scope:!0,restrict:"E",templateUrl:b,transclude:!0,link:function(b){var d=b.notification;d.isShown=!0;var e=function(){b.notification.isShown=!1,a(function(){for(var a=b.$parent.notifications,c=a.length-1;c>=0;c--)a[c].isShown||b.$parent.notifications.splice(c,1)},c.FADE_DELAY)};b.closeNotification=e,d.timeout(e),d.isCentered=-1!==d.position.indexOf("center")}}}]).directive("llNotificationContent",["$compile",function(a){function b(a){this.scope=a}function c(a){this.template=a}return b.prototype={populateWith:function(a){if("object"==typeof a)for(var b in a)this.scope[b]=a[b];return this.scope}},c.prototype={toTemplate:function(){return"<div>"+this.template+"</div>"}},{scope:!0,restrict:"E",transclude:!0,link:function(d,e){d=new b(d).populateWith(d.notification.scope),e.replaceWith(a(new c(d.notification.template).toTemplate())(d))}}}])}(),angular.module("llNotifier").run(["$templateCache",function(a){"use strict";a.put("src/notification.html",'<div class="notifier-msg {{notification.position}}" ng-show="notification.isShown" ng-cloak>\n <div class="notifier-msg-content {{notification.type}}" ng-class="{center: notification.isCentered}">\n <div ng-transclude></div>\n <button class="notifier-button notifier-button-close" ng-click="closeNotification()">x</button>\n </div>\n</div>\n'),a.put("src/notifications.html",'<ll-notification ng-repeat="notification in notifications" type="notification.type" position="notification.position" has-delay="notification.hasDelay" delay="notification.delay">\n <ll-notification-content></ll-notification-content>\n</ll-notification>\n')}]); | PypiClean |
/Khooshe-0.2.tar.gz/Khooshe-0.2/khooshe/khooshe.py | import os
import shutil
import csv
import numpy as np
from scipy.cluster.vq import kmeans, vq
def remove_tiles_folder(tile_name):
'''
'''
if os.path.exists(tile_name):
shutil.rmtree(tile_name)
def create_folder(folder_name):
'''
'''
if os.path.exists(folder_name):
shutil.rmtree(folder_name)
os.makedirs(folder_name)
def get_points_count(points_file):
'''
'''
with open(points_file) as f:
count = sum(1 for line in f)
return count
def read_point_data(points_file):
'''
'''
tmp1 = []
tmp2 = []
with open(points_file, 'rU') as csv_f:
reader = csv.reader(csv_f)
try:
for index, row in enumerate(reader):
if row[0] and row[1] and row[2]:
tmp1.append([float(row[0]), float(row[1])])
tmp2.append([float(row[0]), float(row[1]), row[2]])
except:
raise Exception("Cannot read data from point text file.")
return tmp1, tmp2
def unique_array(point_array):
'''
'''
a = np.ascontiguousarray(point_array)
unique_a = np.unique(a.view([('', a.dtype)]*a.shape[1]))
return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
def init_dictionary(tile_name):
'''
'''
with open('{0}/dict.csv'.format(tile_name), 'w') as dic_csv:
writer = csv.writer(dic_csv)
writer.writerow(['folder', 'file', 'extent'])
def make_dictionary(temp, tile_name):
'''
'''
with open('{0}/dict.csv'.format(tile_name), 'a') as dic_csv:
writer = csv.writer(dic_csv)
for each in temp:
writer.writerow([each[0], each[1], "{0}, {1}, {2}, {3}".format(min(each[3]), min(each[2]), max(each[3]), max(each[2]))])
def make_first_layer(unique_points, centroids_number, tile_name):
'''
'''
new_data = {}
centroids, _ = kmeans(unique_points, centroids_number)
idx, _ = vq(unique_points, centroids)
shapes = []
temp = []
for each in range(len(centroids)):
points = unique_points[idx==each]
new_data['{0}'.format(each)] = points
shapes.append(points.shape[0])
create_folder('{0}/0'.format(tile_name))
with open('{0}/0/0.csv'.format(tile_name),'w') as csv_n:
writer = csv.writer(csv_n, delimiter=',')
writer.writerow(['latitude', 'longitude', 'label', 'info'])
temp_lat = []
temp_lon = []
for index, centroid in enumerate(centroids):
writer.writerow([centroid[0], centroid[1], shapes[index], shapes[index]])
temp_lat.append(centroid[0])
temp_lon.append(centroid[1])
temp.append([0, 0, temp_lat, temp_lon])
init_dictionary(tile_name)
make_dictionary(temp, tile_name)
return centroids, shapes, new_data
def make_rest_of_layers(data, centroids, shapes, centroids_number, tile_name, point_dict):
'''
'''
count = 1
temp = []
while True:
create_folder('{0}/{1}'.format(tile_name, count))
new_datas = {}
for key in data.keys():
if data[key].shape[0] < 10:
with open('{0}/{1}/{2}.csv'.format(tile_name, count, key), 'w') as csv_n:
writer = csv.writer(csv_n)
writer.writerow(['latitude', 'longitude', 'label', 'info'])
for point in data[key]:
info = point_dict["{0}_{1}".format(point[0], point[1])]
writer.writerow([point[0], point[1], 'p', info])
temp.append([count, key, [point[0]], [point[1]]])
else:
centroids,_ = kmeans(data[key], centroids_number)
idx,_ = vq(data[key],centroids)
for each in range(len(centroids)):
points = data[key][idx==each]
new_datas['{0}_{1}'.format(key, each)] = points
shapes.append(points.shape[0])
with open('{0}/{1}/{2}.csv'.format(tile_name, count, key), 'w') as csv_n:
writer = csv.writer(csv_n)
writer.writerow(['latitude', 'longitude', 'label', 'info'])
temp_lat = []
temp_lon = []
for a, centroid in enumerate(centroids):
if shapes[a] > 1:
writer.writerow([centroid[0], centroid[1], shapes[a], shapes[a]])
temp_lat.append(centroid[0])
temp_lon.append(centroid[1])
if len(temp_lat) >= 1 and len(temp_lon) >= 1:
temp.append([count, key, temp_lat, temp_lon])
shapes = []
data = 0
data = new_datas
new_datas = 0
count += 1
make_dictionary(temp, tile_name)
if data == {}:
break
def run_khooshe(points_obj, points_file, tile_name):
'''
'''
CENTROIDS_NUMBER = 15
remove_tiles_folder(tile_name)
if points_file:
points_count = get_points_count(points_file)
point_array, point_array2 = read_point_data(points_file)
print "Reading points --> DONE."
else:
point_array = points_obj
point_dict = {}
for point in point_array2:
point_dict['{0}_{1}'.format(point[0], point[1])] = str(point[2])
unique_points = unique_array(point_array)
print "Finding unique points --> DONE."
centroids, shapes, new_data = make_first_layer(unique_points, CENTROIDS_NUMBER, tile_name)
make_rest_of_layers(new_data, centroids, shapes, CENTROIDS_NUMBER, tile_name, point_dict)
print "Creating layers --> DONE."
if __name__ == '__main__':
run_khooshe(None, 'sample_points.csv', 'tiles') | PypiClean |
/CloudFerry-1.55.2.tar.gz/CloudFerry-1.55.2/QUICKSTART.md | # Quick Start Guide
1. Get CloudFerry sources
```
git clone https://github.com/MirantisWorkloadMobility/CloudFerry.git
cd CloudFerry
# The most recent code is in devel branch
git checkout -b devel origin/devel
```
2. Install vagrant (devlab requires vagrant version >= 1.6)
```
wget https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.4_x86_64.deb
sudo dpkg -i vagrant_1.7.4_x86_64.deb
```
3. Install virtualbox hypervisor
```
sudo apt-get install virtualbox -y
```
4. Setup development environment
```
cd CloudFerry/cloudferry_devlab
vagrant up grizzly icehouse nfs
```
5. Setup virtual environment for cloudferry
```
apt-get install python-dev libssl-dev python-virtualenv libffi-dev -y
cd CloudFerry
virtualenv .venv
source .venv/bin/activate
pip install -r requirements.txt
pip install -r test-requirements.txt
```
6. Generate cloudferry config for development lab
```
cd CloudFerry
./cloudferry_devlab/provision/generate_config.sh --cloudferry-path $(pwd)
```
7. Generate load on source VM (this will create a number of VMs on grizzly node)
```
cd CloudFerry/
source ./cloudferry_devlab/tests/openrc.example
python ./cloudferry_devlab/generate_load.py
```
8. Run migration
```
cd CloudFerry
source .venv/bin/activate
cloudferry migrate configuration.ini --debug
```
| PypiClean |
/CC-dbgen-0.2.0.tar.gz/CC-dbgen-0.2.0/dbgen/inputs/rxn.py | from dbgen.support.datatypes.table import Table,Col,FK
from dbgen.support.datatypes.rule import Rule,Plan,PureSQL,SimpleUpdate
from dbgen.support.datatypes.misc import Arg,noIndex
from dbgen.support.datatypes.sqltypes import Varchar,Decimal
from dbgen.support.utils import mkInsCmd
##########################################################################################
stoich = Table('stoich'
,desc = 'Mapping table between jobs and reactions'
,cols = [Col('job_id', pk=True)
,Col('rxn_id', pk=True)
,Col('stoich', nn=True)]
,fks = [FK('job_id','relax_job')
,FK('rxn_id','rxn')])
rxn = Table('rxn'
,desc = 'Any reaction'
,cols = [Col('rxn_id', pk=True,auto=True)
,Col('name', Varchar(), nn=True,uniq=True)
,Col('is_adsorption', nn=True)
,Col('dE', Decimal())])
ads_triple = Table('ads_triple'
,desc='Triples of bare + complex + adsorbate'
,cols = [Col('bare_id', pk=True)
,Col('complex_id', pk=True)
,Col('adsorbate_id', pk=True)
,Col('delta_e_surf',Decimal(), nn=True)
,Col('calc_id', nn=True)
,Col('metal_comp',Varchar(), nn=True)
]
,fks = [FK('bare_id', 'relax_job', 'job_id')
,FK('complex_id', 'relax_job', 'job_id')
,FK('adsorbate_id', 'adsorbate', 'adsorbate_id')
,FK('calc_id', 'calc', 'calc_id')
])
tables = [stoich,rxn,ads_triple]
##########################################################################################
##########################################################################################
##########################################################################################
rxn_energies = Rule('rxn_energies'
,query = """SELECT S.rxn_id,SUM(T.energy * S.stoich) AS dE
FROM stoich AS S
JOIN rxn AS R USING (rxn_id)
JOIN finaltraj AS T USING (job_id)
WHERE R.dE IS NULL
GROUP BY S.rxn_id """
,plan = Plan([SimpleUpdate(rxn,['dE'])]))
################################################
# Pairs of surfaces that differ by an adsorbate
#################################################
pop_ads_triple=Rule('ads_triple'
,plan=Plan(PureSQL(["""
INSERT INTO ads_triple (bare_id
,complex_id
,adsorbate_id
,delta_e_surf
,calc_id
,metal_comp)
SELECT X.bare_id
,X.complex_id
,A.adsorbate_id
,X.delta_e_surf
,X.calc_id
,X.metal_comp
FROM
(SELECT
F1.job_id AS complex_id
,F2.job_id AS bare_id
,MIN(F1.energy - F2.energy) AS delta_e_surf -- MIN actually doesn't matter - schema doesn't know Finaltraj as 1-1 relationship
,J1.calc_id AS calc_id
,MIN(S1.metal_comp) AS metal_comp -- MIN actually doesn't matter
, GROUP_CONCAT(CONCAT('[',C.element_id,
',',C.count - COALESCE(C2.count, 0),
']')
ORDER BY C.element_id ASC) AS composition
FROM
finaltraj F1
JOIN struct AS S1 ON F1.struct_id=S1.struct_id
JOIN surface AS SS1 ON S1.struct_id=SS1.struct_id
JOIN relax_job AS J1 ON F1.job_id=J1.job_id
JOIN struct AS S2 USING (metal_comp)
JOIN finaltraj AS F2 ON F2.struct_id=S2.struct_id
JOIN relax_job AS J2 ON F2.job_id=J2.job_id AND J2.calc_id=J1.calc_id
JOIN surface AS SS2 ON S2.struct_id=SS2.struct_id
JOIN struct_composition AS C ON (C.struct_id=S1.struct_id)
LEFT JOIN struct_composition AS C2 ON C2.struct_id = S2.struct_id AND C2.element_id=C.element_id
WHERE
-- (F1.job_id,F2.job_id) NOT IN (SELECT complex_id,bare_id FROM ads_triple) AND
S1.n_atoms > S2.n_atoms
AND S1.n_elems >= S2.n_elems
AND C.element_id IN (1 , 2, 6, 7, 8) -- already know they have same metal stoich -
AND C.count - COALESCE(C2.count, 0) != 0 -- ignore in case the have the same # of some nonmetal
GROUP BY complex_id,bare_id) AS X
JOIN adsorbate AS A ON A.composition = X.composition
ON DUPLICATE KEY UPDATE bare_id=X.bare_id"""])))
rules = [rxn_energies,pop_ads_triple] | PypiClean |
/Neveregiveup-2.0.3.tar.gz/Neveregiveup-2.0.3/wargame/knight.py | from __future__ import print_function
from abstractgameunit import AbstractGameUnit
from gameutils import print_bold
class Knight(AbstractGameUnit):
"""Class that represents the game character 'Knight'
The player instance in the game is a Knight instance. Other Knight
instances are considered as 'friends' of the player and is
indicated by the attribute `self.unit_type` .
"""
def __init__(self, name='Sir Foo'):
super().__init__(name=name)
self.max_hp = 40
self.health_meter = self.max_hp
self.unit_type = 'friend'
def info(self):
"""Print basic information about this character.
Overrides AbstractGameUnit.info
"""
print("I am a Knight!")
def acquire_hut(self, hut):
"""'Fight' the combat (command line) to acquire the hut
:arg Hut hut: The hut that needs to be acquired.
.. todo:: Refactor this method as an exercise
Example: Can you use self.enemy instead of calling
hut.occupant every time?
"""
print_bold("Entering hut %d..." % hut.number, end=' ')
is_enemy = (isinstance(hut.occupant, AbstractGameUnit) and
hut.occupant.unit_type == 'enemy')
continue_attack = 'y'
# Code block that tells what to do when you see, an enemy or a friend
# or no one in the hut.
# TODO: Refactor this.
if is_enemy:
print_bold("Enemy sighted!")
self.show_health(bold=True, end=' ')
hut.occupant.show_health(bold=True, end=' ')
while continue_attack:
continue_attack = input("\n...continue attack? (y/n): ")
if continue_attack == 'n':
self.run_away()
break
self.attack(hut.occupant)
if hut.occupant.health_meter <= 0:
print("")
hut.acquire(self)
break
if self.health_meter <= 0:
print("")
break
else:
if hut.get_occupant_type() == 'unoccupied':
print_bold("Hut is unoccupied")
else:
print_bold("Friend sighted!")
hut.acquire(self)
self.heal()
def run_away(self):
"""Abandon the combat and run away from the hut
If the player is losing the combat, there is an option
to leave the hut. A strategy to rejuvenate and restart the combat
for a better chance of winning.
..seealso :: :py:meth:`self.acquire_hut`
"""
print_bold("RUNNING AWAY...")
self.enemy = None | PypiClean |
/Exceptable-0.1.0.tar.gz/Exceptable-0.1.0/exceptable/base.py | import re
def __factory__(regex):
class Exceptable(object):
def __init__(self, wraps, mappings):
"""
* wraps - is the base exception type to wrap.
* mappings - the dict of strings to mappings that represent the Exceptable chain.
Strings will be searched for in the base exception string using the regex
^[a-zA-Z0-9]{1,}::[\w]{0,}$
"""
self.wraps = wraps
self.mappings = mappings
self.regex = re.compile(regex)
self.regex_base = regex
# We also need to extract the exceptions from the database, for
# this particular set of exceptions.
# Exceptions are mapped via the exceptable.register function in the
# database, and should be queriable and used easily
def __call__(self, func):
def decorates(*args,**kwargs):
try:
return func(*args,**kwargs)
except self.wraps, e:
msg = str(e)
if self.regex is not None:
g = self.regex.match(msg)
if g:
# We have a group match. Woot.
if g.group('type') in self.mappings.keys():
curr = g.group('type')
msg = g.group('msg')
raise self.mappings[curr](msg)
else:
# We don't have a match. Re-raise
raise e
else:
raise e # Why is this even happening?
return decorates
def add(self, mappings):
for key, val in mappings.items():
# Allows for easy overrides
self.mappings[key] = val
def remove(self, key):
if key in self.mappings:
del self.mappings[key]
def exception(self, key):
if key in self.mappings:
return self.mappings.get(key)
def mapped(self):
pass
def register(self, key, exception):
self.mappings[key] = exception
return Exceptable
r = __factory__('^(?P<type>[a-zA-Z]{1,})::(?P<msg>[a-zA-Z\. ]{0,})$')
class Except(r):
"""Except
User-defined exception trapping. Traps all DB-level exceptions in the
format of Except::Message
This type can be extended using the .add mechanism.
"""
pass
"""
Use via:
excepts = Except(ProgrammingError, {"PermissionError":PermissionError})
@excepts
def someFunc():
db_operation()
""" | PypiClean |
/NERDA_Con-0.0-py3-none-any.whl/NERDA_Con/precooked.py | from NERDA_Con.datasets import get_dane_data, get_conll_data
from NERDA_Con.models import NERDA
import os
import urllib
from pathlib import Path
from progressbar import ProgressBar
pbar = None
# helper function to show progressbar
def show_progress(block_num, block_size, total_size):
global pbar
if pbar is None:
pbar = ProgressBar(maxval=total_size)
downloaded = block_num * block_size
pbar.start()
if downloaded < total_size:
pbar.update(downloaded)
else:
pbar.finish()
pbar = None
class Precooked(NERDA):
"""Precooked NERDA Model
NERDA model specification that has been precooked/pretrained
and is available for download.
Inherits from [NERDA.models.NERDA][].
"""
def __init__(self, **kwargs) -> None:
"""Initialize Precooked NERDA Model
Args:
kwargs: all arguments for NERDA Model.
"""
super().__init__(**kwargs)
def download_network(self, dir = None) -> None:
"""Download Precooked Network from Web
Args:
dir (str, optional): Directory where the model file
will be saved. Defaults to None, in which case
the model will be saved in a folder '.nerda' in
your home directory.
Returns:
str: Message saying if the download was successfull.
Model is downloaded as a side-effect.
"""
model_name = type(self).__name__
# url for public S3 bucket with NERDA models.
url_s3 = 'https://nerda.s3-eu-west-1.amazonaws.com'
url_model = f'{url_s3}/{model_name}.bin'
if dir is None:
dir = os.path.join(str(Path.home()), '.nerda')
if not os.path.exists(dir):
os.mkdir(dir)
file_path = os.path.join(dir, f'{model_name}.bin')
print(
"""
Please make sure, that you're running the latest version of 'NERDA'
otherwise the model is not guaranteed to work.
"""
)
print(f'Downloading {url_model} to {file_path}')
urllib.request.urlretrieve(url_model, file_path, show_progress)
return "Network downloaded successfully. Load network with 'load_network'."
def load_network(self, file_path: str = None) -> None:
"""Load Pretrained Network
Loads pretrained network from file.
Args:
file_path (str, optional): Path to model file. Defaults to None,
in which case, the function points to the '.nerda' folder
the home directory.
"""
model_name = type(self).__name__
if file_path is None:
file_path = os.path.join(str(Path.home()), '.nerda', f'{model_name}.bin')
assert os.path.exists(file_path), "File does not exist! You can download network with download_network()"
print(
"""
Model loaded. Please make sure, that you're running the latest version
of 'NERDA' otherwise the model is not guaranteed to work.
"""
)
self.load_network_from_file(file_path)
class DA_BERT_ML(Precooked):
"""NERDA [Multilingual BERT](https://huggingface.co/bert-base-multilingual-uncased)
for Danish Finetuned on [DaNE data set](https://github.com/alexandrainst/danlp/blob/master/docs/docs/datasets.md#dane).
Inherits from [NERDA.precooked.Precooked][].
Examples:
>>> from NERDA.precooked import DA_BERT_ML()
>>> model = DA_BERT_ML()
>>> model.download_network()
>>> model.load_network()
>>> text = 'Jens Hansen har en bondegård'
>>> model.predict_text(text)
([['Jens', 'Hansen', 'har', 'en', 'bondegård']], [['B-PER', 'I-PER', 'O', 'O', 'O']])
"""
def __init__(self, device: str = None) -> None:
"""Initialize model"""
super().__init__(transformer = 'bert-base-multilingual-uncased',
device = device,
tag_scheme = [
'B-PER',
'I-PER',
'B-ORG',
'I-ORG',
'B-LOC',
'I-LOC',
'B-MISC',
'I-MISC'
],
tag_outside = 'O',
max_len = 128,
dropout = 0.1,
hyperparameters = {'epochs' : 4,
'warmup_steps' : 500,
'train_batch_size': 13,
'learning_rate': 0.0001},
tokenizer_parameters = {'do_lower_case' : True})
class DA_DISTILBERT_ML(Precooked):
"""NERDA [Multilingual BERT](https://huggingface.co/distilbert-base-multilingual-cased)
for Danish Finetuned on [DaNE data set](https://github.com/alexandrainst/danlp/blob/master/docs/docs/datasets.md#dane).
Inherits from [NERDA.precooked.Precooked][].
Examples:
>>> from NERDA.precooked import DA_DISTILBERT_ML()
>>> model = DA_DISTILBERT_ML()
>>> model.download_network()
>>> model.load_network()
>>> text = 'Jens Hansen har en bondegård'
>>> model.predict_text(text)
([['Jens', 'Hansen', 'har', 'en', 'bondegård']], [['B-PER', 'I-PER', 'O', 'O', 'O']])
"""
def __init__(self, device: str = None) -> None:
"""Initialize model"""
super().__init__(transformer = 'distilbert-base-multilingual-cased',
device = device,
tag_scheme = [
'B-PER',
'I-PER',
'B-ORG',
'I-ORG',
'B-LOC',
'I-LOC',
'B-MISC',
'I-MISC'
],
tag_outside = 'O',
max_len = 128,
dropout = 0.1,
hyperparameters = {'epochs' : 4,
'warmup_steps' : 500,
'train_batch_size': 13,
'learning_rate': 0.0001},
tokenizer_parameters = {'do_lower_case' : False})
class DA_ELECTRA_DA(Precooked):
"""NERDA [Danish ELECTRA](https://huggingface.co/Maltehb/-l-ctra-danish-electra-small-uncased)
for Danish finetuned on [DaNE data set](https://github.com/alexandrainst/danlp/blob/master/docs/docs/datasets.md#dane).
Inherits from [NERDA.precooked.Precooked][].
Examples:
>>> from NERDA.precooked import DA_ELECTRA_DA()
>>> model = DA_ELECTRA_DA()
>>> model.download_network()
>>> model.load_network()
>>> text = 'Jens Hansen har en bondegård'
>>> model.predict_text(text)
([['Jens', 'Hansen', 'har', 'en', 'bondegård']], [['B-PER', 'I-PER', 'O', 'O', 'O']])
"""
def __init__(self, device: str = None) -> None:
"""Initialize model"""
super().__init__(transformer = 'Maltehb/-l-ctra-danish-electra-small-uncased',
device = device,
tag_scheme = [
'B-PER',
'I-PER',
'B-ORG',
'I-ORG',
'B-LOC',
'I-LOC',
'B-MISC',
'I-MISC'
],
tag_outside = 'O',
max_len = 128,
dropout = 0.1,
hyperparameters = {'epochs' : 5,
'warmup_steps' : 500,
'train_batch_size': 13,
'learning_rate': 0.0001},
tokenizer_parameters = {'do_lower_case' : True})
class EN_ELECTRA_EN(Precooked):
"""NERDA [English ELECTRA](https://huggingface.co/google/electra-small-discriminator)
for English finetuned on [CoNLL-2003 data set](https://www.clips.uantwerpen.be/conll2003/ner/).
Inherits from [NERDA.precooked.Precooked][].
Examples:
>>> from NERDA.precooked import EN_ELECTRA_EN()
>>> model = EN_ELECTRA_EN()
>>> model.download_network()
>>> model.load_network()
>>> text = 'Old MacDonald had a farm'
>>> model.predict_text(text)
([['Old', 'MacDonald', 'had', 'a', 'farm']], [['B-PER', 'I-PER', 'O', 'O', 'O']])
"""
def __init__(self, device: str = None) -> None:
"""Initialize model"""
super().__init__(transformer = 'google/electra-small-discriminator',
device = device,
tag_scheme = [
'B-PER',
'I-PER',
'B-ORG',
'I-ORG',
'B-LOC',
'I-LOC',
'B-MISC',
'I-MISC'
],
tag_outside = 'O',
max_len = 128,
dropout = 0.1,
hyperparameters = {'epochs' : 4,
'warmup_steps' : 250,
'train_batch_size': 13,
'learning_rate': 8e-05},
tokenizer_parameters = {'do_lower_case' : True})
class EN_BERT_ML(Precooked):
"""NERDA [Multilingual BERT](https://huggingface.co/bert-base-multilingual-uncased)
for English finetuned on [CoNLL-2003 data set](https://www.clips.uantwerpen.be/conll2003/ner/).
Inherits from [NERDA.precooked.Precooked][].
Examples:
>>> from NERDA.precooked import EN_BERT_ML()
>>> model = EN_BERT_ML()
>>> model.download_network()
>>> model.load_network()
>>> text = 'Old MacDonald had a farm'
>>> model.predict_text(text)
([['Old', 'MacDonald', 'had', 'a', 'farm']], [['B-PER', 'I-PER', 'O', 'O', 'O']])
"""
def __init__(self, device: str = None) -> None:
"""Initialize model"""
super().__init__(transformer = 'bert-base-multilingual-uncased',
device = device,
tag_scheme = [
'B-PER',
'I-PER',
'B-ORG',
'I-ORG',
'B-LOC',
'I-LOC',
'B-MISC',
'I-MISC'
],
tag_outside = 'O',
max_len = 128,
dropout = 0.1,
hyperparameters = {'epochs' : 4,
'warmup_steps' : 500,
'train_batch_size': 13,
'learning_rate': 0.0001},
tokenizer_parameters = {'do_lower_case' : True}) | PypiClean |
/BucketCache-0.12.1.tar.gz/BucketCache-0.12.1/bucketcache/utilities.py | from __future__ import absolute_import, division, print_function
import inspect
import json
import sys
import weakref
from collections import namedtuple
from copy import copy
from functools import partial, wraps
from decorator import decorator as decorator
from .compat.contextlib import suppress
from .exceptions import KeyInvalidError
from .log import logger
__all__ = ()
FullArgSpec = namedtuple(
'FullArgSpec',
['args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', 'kwonlydefaults',
'annotations'])
NormalizedArgs = namedtuple(
'NormalizedArgs',
['varargs', 'normargs', 'callargs'])
CachedCallInfo = namedtuple(
'CachedCallInfo',
['varargs', 'callargs', 'return_value', 'expiration_date'])
PrunedFilesInfo = namedtuple('PrunedFilesInfo', ['size', 'num'])
def fullargspec_from_argspec(argspec):
return FullArgSpec(
*argspec, kwonlyargs=[], kwonlydefaults=None, annotations={})
class DecoratorFactory(object):
"""Produce decorator to wrap a function with a bucket.
The decorator function is returned because using a class breaks
help(instance). See http://stackoverflow.com/a/25973438/2093785
"""
def __init__(self, bucket, method=False, nocache=None, callback=None,
ignore=None):
self.bucket = bucket
self.method = method
self.nocache = nocache
self.callback = callback
self.fref = None
self.property = False
if ignore is None:
ignore = ()
self.ignore = ignore
def decorate(self, f):
if isinstance(f, property):
f = f.fget
self.property = True
# method=True can be excluded when decorating a property because
# it's detectable. Set it now.
self.method = True
self.fref = weakref.ref(f)
# Try and use getargspec() first so that cache will work on source
# compatible with Python 2 and 3.
try:
argspec = inspect.getargspec(f)
argspec = fullargspec_from_argspec(argspec)
all_args = set(argspec.args)
except ValueError:
argspec = inspect.getfullargspec(f)
all_args = set(argspec.args)
all_args.update(argspec.kwonlyargs)
if self.nocache:
if self.nocache not in all_args:
raise TypeError("nocache decorator argument '{}'"
"missing from argspec.".format(self.nocache))
test_set = set(self.ignore)
# *args and **kwargs can be ignored too
if argspec.varargs in self.ignore:
test_set -= set([argspec.varargs])
if argspec.varkw in self.ignore:
test_set -= set([argspec.varkw])
raise_invalid_keys(all_args, test_set,
message='parameter{s} cannot be ignored if not '
'present in argspec: {keys}')
fsig = (f.__name__, argspec._asdict())
def load_or_call(f, key_hash, args, kwargs, varargs, callargs):
"""Load function result from cache, or call function and cache
result.
args and kwargs are used to call original function.
varargs and callargs are used to call callback.
"""
skip_cache = False
if self.nocache:
skip_cache = callargs[self.nocache]
def call_and_cache():
logger.info('Calling function {}', f)
res = f(*args, **kwargs)
obj = self.bucket._update_or_make_obj_with_hash(key_hash, res)
self.bucket._set_obj_with_hash(key_hash, obj)
return res
called = False
if skip_cache:
result = call_and_cache()
called = True
else:
try:
obj = self.bucket._get_obj_from_hash(key_hash)
result = obj.value
except KeyInvalidError:
result = call_and_cache()
called = True
else:
logger.info('Function call loaded from cache: {}', f)
if self.callback:
callinfo = CachedCallInfo(varargs, callargs, result,
obj.expiration_date)
if self.method:
instance = callargs[argspec.args[0]]
self.callback(instance, callinfo)
else:
self.callback(callinfo)
return result, called
def wrapper(f, *args, **kwargs):
normalized_args = normalize_args(f, *args, **kwargs)
varargs, normargs, callargs = normalized_args
sig_normargs = normargs.copy()
sig_varargs = copy(varargs)
# Delete nocache parameter from call arg used for signature.
if self.nocache:
del sig_normargs[self.nocache]
for arg in self.ignore:
if arg == argspec.varargs:
sig_varargs = ()
elif arg == argspec.varkw:
for kwarg in callargs[argspec.varkw]:
del sig_normargs[kwarg]
else:
del sig_normargs[arg]
if self.method:
instance = args[0]
# Delete instance parameter from call arg used for signature.
del sig_normargs[argspec.args[0]]
sig_instance = get_instance_signature(instance)
signature = (sig_instance, fsig, sig_varargs, sig_normargs)
else:
signature = (fsig, sig_varargs, sig_normargs)
# Make key_hash before function call, and raise error
# if state changes (hash is different) afterwards.
key_hash = self.bucket._hash_for_key(signature)
ret, called = load_or_call(f, key_hash, args, kwargs, varargs, callargs)
if called:
post_key_hash = self.bucket._hash_for_key(signature)
if key_hash != post_key_hash:
optional = ''
if self.method:
optional = ' or instance state'
raise ValueError(
"modification of input parameters{} by function"
" '{}' cannot be cached.".format(optional, f.__name__))
return ret
new_function = decorator(wrapper, f)
new_function.callback = self.add_callback
if self.property:
new_function = property(new_function)
return new_function
def add_callback(self, f):
"""Magic method assigned to f.callback that allows a callback to be
defined as follows:
@bucket
def fun(...):
...
@fun.callback
def fun():
...
In the event that a cached result is used, the callback is fired.
"""
self.callback = f
return self.decorate(self.fref())
def get_instance_signature(instance):
"""Get state of instance for cache signature (as part of key).
Attempts to get state will be done in this order:
- instance._getsate_bucketcache_()
- instance.__getstate__()
- instance.__dict__
"""
with suppress(AttributeError):
return instance._getsate_bucketcache_()
with suppress(AttributeError):
return instance.__getstate__()
return instance.__dict__
def normalize_args(f, *args, **kwargs):
"""Normalize call arguments into keyword form and varargs.
args can only be non-empty if there is *args in the argument specification.
"""
callargs = inspect.getcallargs(f, *args, **kwargs)
original_callargs = callargs.copy()
try:
argspec = inspect.getargspec(f)
except ValueError:
argspec = inspect.getfullargspec(f)
else:
argspec = fullargspec_from_argspec(argspec)
if hasattr(argspec, 'varkw'):
if argspec.varkw:
kwargs = callargs.pop(argspec.varkw, {})
callargs.update(kwargs)
if argspec.varargs:
varargs = callargs.pop(argspec.varargs, ())
else:
varargs = ()
# now callargs is all keywords
return NormalizedArgs(varargs=varargs,
normargs=callargs,
callargs=original_callargs)
def raise_invalid_keys(valid_keys, passed_keys, message=None):
if message is None:
message = 'Invalid keyword argument{s}: {keys}'
if not passed_keys <= valid_keys:
invalid_keys = passed_keys - valid_keys
raise_keys(invalid_keys, message=message)
def raise_keys(keys, message):
invalid_str = ', '.join(keys)
s = 's' if len(keys) > 1 else ''
raise TypeError(message.format(s=s, keys=invalid_str)) | PypiClean |
/Inti-0.0.0a0.tar.gz/Inti-0.0.0a0/inti/MA/MAESLoader.py | import os
import logging
from elasticsearch import Elasticsearch, helpers
from inti.MA.MAExecutor import MAExecutor
from inti.MA.MAMetadata import MAColumnNames
class MAESLoader:
def __init__(self, file_name, index_name, field_name, col_names, sep='\t', buffer_size=1024 * 1024,
db_ip='127.0.0.1', db_port=9200, timeout=120, log_file='maesbase.log', info_level=logging.DEBUG):
'''
Class to load a field from a Microsoft Academic file on Elastic Search database,
Parameter:
file_name: string
name of the file to load, ex: .../mag/Papers.txt
index_name:string
database name (index) on for Elastic Search
filed_name: string
Name of the field for the index, ex: PaperTitle
col_names: dict
name of the columns for the given file.
Object provide for Inti.MA.Metadata.MAColumnNames
sep: string
separator for the *.txt files, the default one is '\t'
buffer_size: int
parameter that specifies the size of the buffer for every process,
while the text files are loaded on RAM before insert if on MongoDB
db_ip: string
database ip for connection to Elastic Search
db_port: int
database port for connection to Elastic Search
timeout: int
timeout for persistent connection to Elastic Search
log_file:string
file log name
info_level: logging flag
the default at the moment is DEBUG
'''
self.file_name = file_name
self.buffer_size = buffer_size
self.info_level = info_level
self.log_file = log_file
self.logger = logging.getLogger(__name__)
self.set_info_level(info_level)
self.sep = sep
self.db_ip = db_ip
self.db_port = db_port
self.timeout = timeout
self.col_names = col_names
self.field_name = field_name
self.index_name = index_name
def process(self, line):
'''
Process the line, adding the metadata to create a dictionary
Parameters:
line:string
line from the MA file with the data values.
Returns:
register:dict
dictionary with the information on the metadata and values.
'''
register = {}
if isinstance(line, type(bytes())):
line = line.decode('utf-8')
fields = line.split(self.sep)
if len(fields) == len(self.col_names):
for index in range(len(self.col_names)):
col_name = self.col_names[index]
register[col_name] = fields[index]
return register
else:
# TODO:Error here
pass
def set_info_level(self, info_level):
'''
Information level for debug or verbosity of the application (https://docs.python.org/3.1/library/logging.html)
'''
if info_level != logging.DEBUG:
logging.basicConfig(filename=self.log_file, level=info_level)
self.info_level = info_level
def process_wrapper(self, file_name, index_name, chunkStart, chunkSize):
'''
Allows to insert the content of the file in the Elastic Search
Parameters:
file_name:string
name of the file, ex: ...mag/Papers.txt
index_name:string
name of the database(index) on Elastic Search ex: MAG
chunkStart:int
starting point to read the file
chunkSize:int
size of the chunk from the starting point
'''
es = Elasticsearch(
HOST=self.db_ip,
PORT=self.db_port,
timeout=self.timeout)
with open(self.file_name, 'rb') as f:
f.seek(chunkStart)
lines = f.read(chunkSize).decode('utf-8').split('\r\n')
processed_lines = []
for line in lines:
line = self.process(line)
if line is not None:
entry = {"_index": self.index_name,
"_id": str(line['PaperId']),
"_source": {self.field_name: line[self.field_name]}}
processed_lines.append(entry)
try:
helpers.bulk(
es,
processed_lines,
refresh=True,
request_timeout=self.timeout)
except Exception as e:
# This can happen if the server is restarted or the connection
# becomes unavilable
print(str(e))
def chunkify(self, file_name):
'''
Allows to split the file chunks, according to the buffer_size provided in the constructor
Parameter:
file_name: string
filename to be splitted ex: ../mag/Papers.txt
Returns:
chunkStart: int
starting point to read the file
checkSize: int
size of the buffer from the starting point.
this is because we need to be sure we are reading the whole line until end of line.
'''
fileEnd = os.path.getsize(self.file_name)
with open(self.file_name, 'rb') as f:
chunkEnd = f.tell()
while True:
chunkStart = chunkEnd
f.seek(self.buffer_size, 1)
f.readline()
chunkEnd = f.tell()
yield chunkStart, chunkEnd - chunkStart
if chunkEnd > fileEnd:
break
def run(self, max_threads=None):
'''
Calls the executor to run in parallel.
'''
MAExecutor(
self,
self.field_name,
self.index_name,
max_threads=max_threads)
def run(mag_dir, col_name, index_name, field_name, sep='\t', buffer_size=1024 * 1024,
db_ip='127.0.0.1', db_port=9200, timeout=120, max_threads=None):
'''
Calls the executor to run in parallel.
'''
mag_file = mag_dir + '/{}.txt'.format(col_name)
col_names = MAColumnNames["mag"][col_name]
instance = MAESLoader(
mag_file,
index_name,
field_name,
col_names,
sep,
buffer_size,
db_ip,
db_port,
timeout)
instance.run(max_threads=max_threads) | PypiClean |
/Auto-Research-1.0.tar.gz/Auto-Research-1.0/src/arxiv_public_data/s3_bulk_download.py | import os
import re
import gzip
import json
import glob
import shlex
import shutil
import tarfile
import boto3
import hashlib
import requests
import subprocess
from functools import partial
from multiprocessing import Pool
from collections import defaultdict
import xml.etree.ElementTree as ET
from arxiv_public_data import fulltext
from arxiv_public_data.config import DIR_FULLTEXT, DIR_PDFTARS, LOGGER
logger = LOGGER.getChild('s3')
CHUNK_SIZE = 2**20 # 1MB
BUCKET_NAME = 'arxiv'
S3_PDF_MANIFEST = 'pdf/arXiv_pdf_manifest.xml'
S3_TEX_MANIFEST = 'src/arXiv_src_manifest.xml'
HEADERS = {'x-amz-request-payer': 'requester'}
s3 = boto3.client('s3', region_name='us-east-1')
def download_file(filename, outfile, chunk_size=CHUNK_SIZE, redownload=False,
dryrun=False):
"""
Downloads filename from the ArXiv AWS S3 bucket, and returns streaming md5
sum of the content
Parameters
----------
filename : str
KEY corresponding to AWS bucket file
outfile : stf
name and path of local file in which downloaded file will be stored
(optional)
chunk_size : int
requests byte streaming size (so 500MB are not stored in memory
prior to processing)
redownload : bool
Look to see if file is already downloaded, and simply return md5sum
if it it exists, unless redownload is True
dryrun : bool
If True, only log activity
Returns
-------
md5sum : str
md5 checksum of the contents of filename
"""
if os.path.exists(outfile) and not redownload:
md5 = hashlib.md5()
md5.update(gzip.open(outfile, 'rb').read())
return md5.hexdigest()
md5 = hashlib.md5()
url = s3.generate_presigned_url(
"get_object",
Params={
"Bucket": BUCKET_NAME, "Key": filename, "RequestPayer": 'requester'
}
)
if not dryrun:
logger.info('Requesting "{}" (costs money!)'.format(filename))
request = requests.get(url, stream=True)
response_iter = request.iter_content(chunk_size=chunk_size)
logger.info("\t Writing {}".format(outfile))
with gzip.open(outfile, 'wb') as fout:
for i, chunk in enumerate(response_iter):
fout.write(chunk)
md5.update(chunk)
else:
logger.info('Requesting "{}" (free!)'.format(filename))
logger.info("\t Writing {}".format(outfile))
return md5.hexdigest()
def default_manifest_filename():
return os.path.join(DIR_PDFTARS, 'arxiv-manifest.xml.gz')
def get_manifest(filename=None, redownload=False):
"""
Get the file manifest for the ArXiv
Parameters
----------
redownload : bool
If true, forces redownload of manifest even if it exists
Returns
-------
file_information : list of dicts
each dict contains the file metadata
"""
manifest_file = filename or default_manifest_filename()
md5 = download_file(
S3_PDF_MANIFEST, manifest_file, redownload=redownload, dryrun=False
)
manifest = gzip.open(manifest_file, 'rb').read()
return parse_manifest(manifest)
def parse_manifest(manifest):
"""
Parse the XML of the ArXiv manifest file.
Parameters
----------
manifest : str
xml string from the ArXiv manifest file
Returns
-------
file_information : list of dicts
One dict for each file, containing the filename, size, md5sum,
and other metadata
"""
root = ET.fromstring(manifest)
return [
{c.tag: f.find(c.tag).text for c in f.getchildren()}
for f in root.findall('file')
]
def _tar_to_filename(filename):
return os.path.join(DIR_PDFTARS, os.path.basename(filename)) + '.gz'
def download_check_tarfile(filename, md5_expected, dryrun=False, redownload=False):
""" Download filename, check its md5sum, and form the output path """
outname = _tar_to_filename(filename)
md5_downloaded = download_file(
filename, outname, dryrun=dryrun, redownload=redownload
)
if not dryrun:
if md5_expected != md5_downloaded:
msg = "MD5 '{}' does not match expected '{}' for file '{}'".format(
md5_downloaded, md5_expected, filename
)
raise AssertionError(msg)
return outname
def download_check_tarfiles(list_of_fileinfo, dryrun=False):
"""
Download tar files from the ArXiv manifest and check that their MD5sums
match
Parameters
----------
list_of_fileinfo : list
Some elements of results of get_manifest
(optional)
dryrun : bool
If True, only log activity
"""
for fileinfo in list_of_fileinfo:
download_check_tarfile(fileinfo['filename'], fileinfo['md5sum'], dryrun=dryrun)
def call(cmd, dryrun=False, debug=False):
""" Spawn a subprocess and execute the string in cmd """
if dryrun:
logger.info(cmd)
return 0
else:
return subprocess.check_call(
shlex.split(cmd), stderr=None if debug else open(os.devnull, 'w')
)
def _make_pathname(filename):
"""
Make filename path for text document, sorted like on arXiv servers.
Parameters
----------
filename : str
string filename of arXiv article
(optional)
Returns
-------
pathname : str
pathname in which to store the article following
* Old ArXiv IDs: e.g. hep-ph0001001.txt returns
DIR_PDFTARS/hep-ph/0001/hep-ph0001001.txt
* New ArXiv IDs: e.g. 1501.13851.txt returns
DIR_PDFTARS/arxiv/1501/1501.13851.txt
"""
basename = os.path.basename(filename)
fname = os.path.splitext(basename)[0]
if '.' in fname: # new style ArXiv ID
yearmonth = fname.split('.')[0]
return os.path.join(DIR_FULLTEXT, 'arxiv', yearmonth, basename)
# old style ArXiv ID
cat, aid = re.split(r'(\d+)', fname)[:2]
yearmonth = aid[:4]
return os.path.join(DIR_FULLTEXT, cat, yearmonth, basename)
def process_tarfile_inner(filename, pdfnames=None, processes=1, dryrun=False,
timelimit=fulltext.TIMELIMIT):
outname = _tar_to_filename(filename)
if not os.path.exists(outname):
msg = 'Tarfile from manifest not found {}, skipping...'.format(outname)
logger.error(msg)
return
# unpack tar file
if pdfnames:
namelist = ' '.join(pdfnames)
cmd = 'tar --one-top-level -C {} -xf {} {}'
cmd = cmd.format(DIR_PDFTARS, outname, namelist)
else:
cmd = 'tar --one-top-level -C {} -xf {}'.format(DIR_PDFTARS, outname)
_call(cmd, dryrun)
basename = os.path.splitext(os.path.basename(filename))[0]
pdfdir = os.path.join(DIR_PDFTARS, basename, basename.split('_')[2])
# Run fulltext to convert pdfs in tardir into *.txt
converts = fulltext.convert_directory_parallel(
pdfdir, processes=processes, timelimit=timelimit
)
# move txt into final file structure
txtfiles = glob.glob('{}/*.txt'.format(pdfdir))
for tf in txtfiles:
mvfn = _make_pathname(tf)
dirname = os.path.dirname(mvfn)
if not os.path.exists(dirname):
_call('mkdir -p {}'.format(dirname), dryrun)
if not dryrun:
shutil.move(tf, mvfn)
# clean up pdfs
_call('rm -rf {}'.format(os.path.join(DIR_PDFTARS, basename)), dryrun)
def process_tarfile(fileinfo, pdfnames=None, dryrun=False, debug=False, processes=1):
"""
Download and process one of the tar files from the ArXiv manifest.
Download, unpack, and spawn the Docker image for converting pdf2text.
It will only try to download the file if it does not already exist.
The tar file will be stored in DIR_FULLTEXT/<fileinfo[filename](tar)> and the
resulting arXiv articles will be stored in the subdirectory
DIR_FULLTEXT/arxiv/<yearmonth>/<aid>.txt for old style arXiv IDs and
DIR_FULLTEXT/<category>/<yearmonth>/<aid>.txt for new style arXiv IDs.
Parameters
----------
fileinfo : dict
dictionary of file information from parse_manifest
(optional)
dryrun : bool
If True, only log activity
debug : bool
Silence stderr of Docker _call if debug is False
"""
filename = fileinfo['filename']
md5sum = fileinfo['md5sum']
if check_if_any_processed(fileinfo):
logger.info('Tar file appears processed, skipping {}...'.format(filename))
return
logger.info('Processing tar "{}" ...'.format(filename))
process_tarfile_inner(filename, pdfnames=None, processes=processes, dryrun=dryrun)
def process_manifest_files(list_of_fileinfo, processes=1, dryrun=False):
"""
Download PDFs from the ArXiv AWS S3 bucket and convert each pdf to text
Parameters. If files are already downloaded, it will only process them.
----------
list_of_fileinfo : list
Some elements of results of get_manifest
(optional)
processes : int
number of paralell workers to spawn (roughly as many CPUs as you have)
dryrun : bool
If True, only log activity
"""
for fileinfo in list_of_fileinfo:
process_tarfile(fileinfo, dryrun=dryrun, processes=processes)
def check_if_any_processed(fileinfo):
"""
Spot check a tarfile to see if the pdfs have been converted to text,
given an element of the s3 manifest
"""
first = _make_pathname(fileinfo['first_item']+'.txt')
last = _make_pathname(fileinfo['last_item']+'.txt')
return os.path.exists(first) and os.path.exists(last)
def generate_tarfile_indices(manifest):
"""
Go through the manifest and for every tarfile, get a list of the PDFs
that should be contained within it. This is a separate function because
even checking the tars is rather slow.
Returns
-------
index : dictionary
keys: tarfile, values: list of pdfs
"""
index = {}
for fileinfo in manifest:
name = fileinfo['filename']
logger.info("Indexing {}...".format(name))
tarname = os.path.join(DIR_PDFTARS, os.path.basename(name))+'.gz'
files = [i for i in tarfile.open(tarname).getnames() if i.endswith('.pdf')]
index[name] = files
return index
def check_missing_txt_files(index):
"""
Use the index file from `generate_tarfile_indices` to check which pdf->txt
conversions are outstanding.
"""
missing = defaultdict(list)
for tar, pdflist in index.items():
logger.info("Checking {}...".format(tar))
for pdf in pdflist:
txt = _make_pathname(pdf).replace('.pdf', '.txt')
if not os.path.exists(txt):
missing[tar].append(pdf)
return missing
def rerun_missing(missing, processes=1):
"""
Use the output of `check_missing_txt_files` to attempt to rerun the text
files which are missing from the conversion. There are various reasons
that they can fail.
"""
sort = list(reversed(
sorted([(k, v) for k, v in missing.items()], key=lambda x: len(x[1]))
))
for tar, names in sort:
logger.info("Running {} ({} to do)...".format(tar, len(names)))
process_tarfile_inner(
tar, pdfnames=names, processes=processes,
timelimit=5 * fulltext.TIMELIMIT
)
def process_missing(manifest, processes=1):
"""
Do the full process of figuring what is missing and running them
"""
indexfile = os.path.join(DIR_PDFTARS, 'manifest-index.json')
if not os.path.exists(indexfile):
index = generate_tarfile_indices(manifest)
json.dump(index, open(indexfile, 'w'))
index = json.load(open(indexfile))
missing = check_missing_txt_files(index)
rerun_missing(missing, processes=processes) | PypiClean |
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/prminwge.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def prminwge(path):
"""prminwge
Data loads lazily. Type data(prminwge) into the console.
A data.frame with 38 rows and 25 variables:
- year. 1950-1987
- avgmin. weighted avg min wge, 44 indust
- avgwage. wghted avg hrly wge, 44 indust
- kaitz. Kaitz min wage index
- avgcov. wghted avg coverage, 8 indust
- covt. economy-wide coverage of min wg
- mfgwage. avg manuf. wage
- prdef. Puerto Rican price deflator
- prepop. PR employ/popul ratio
- prepopf. PR employ/popul ratio, alter.
- prgnp. PR GNP
- prunemp. PR unemployment rate
- usgnp. US GNP
- t. time trend: 1 to 38
- post74. time trend: starts in 1974
- lprunemp. log(prunemp)
- lprgnp. log(prgnp)
- lusgnp. log(usgnp)
- lkaitz. log(kaitz)
- lprun\_1. lprunemp[\_n-1]
- lprepop. log(prepop)
- lprep\_1. lprepop[\_n-1]
- mincov. (avgmin/avgwage)\*avgcov
- lmincov. log(mincov)
- lavgmin. log(avgmin)
https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_
isbn_issn=9781111531041
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `prminwge.csv`.
Returns:
Tuple of np.ndarray `x_train` with 38 rows and 25 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'prminwge.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/wooldridge/prminwge.csv'
maybe_download_and_extract(path, url,
save_file_name='prminwge.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | PypiClean |
/AaronTools-1.0b14.tar.gz/AaronTools-1.0b14/bin/grabThermo.py |
import sys
import argparse
import os
from glob import glob
from warnings import warn
from numpy import isclose
from AaronTools.comp_output import CompOutput
from AaronTools.fileIO import FileReader
from AaronTools.geometry import Geometry
from AaronTools.utils.utils import glob_files
thermo_parser = argparse.ArgumentParser(
description="print thermal corrections and free energy",
formatter_class=argparse.RawTextHelpFormatter
)
thermo_parser.add_argument(
"infile",
metavar="frequency output file",
type=str,
nargs="*",
default=[sys.stdin],
help="completed QM output file with frequency info"
)
thermo_parser.add_argument(
"-o",
"--output",
type=str,
default=None,
required=False,
dest="outfile",
help="output destination \nDefault: stdout"
)
thermo_parser.add_argument(
"-if",
"--input-format",
type=str,
nargs=1,
default=None,
dest="input_format",
choices=["log", "out", "dat"],
help="file format of input - required if input is stdin"
)
thermo_parser.add_argument(
"-sp",
"--single-point",
type=str,
nargs="*",
default=[None],
required=False,
dest="sp_file",
help="file containing single-point energy"
)
thermo_parser.add_argument(
"-t",
"--temperature",
type=float,
default=None,
required=False,
dest="temp",
help="compute thermal corrections using the specified temperature (K)\n" +
"Default: value found in file or 298.15"
)
thermo_parser.add_argument(
"-w0",
"--frequency-cutoff",
type=float,
default=100.0,
required=False,
dest="w0",
help="cutoff frequency for quasi free energy corrections (1/cm)\n" +
"Default: 100 cm^-1"
)
thermo_parser.add_argument(
"-csv",
"--csv-format",
nargs="?",
required=False,
dest="csv",
default=False,
choices=["comma", "semicolon", "tab", "space"],
help="print output in CSV format with the specified delimiter"
)
thermo_parser.add_argument(
"-r",
"--recursive",
metavar="PATTERN",
type=str,
nargs="*",
default=None,
required=False,
dest="pattern",
help="search subdirectories of current directory for files matching PATTERN"
)
args = thermo_parser.parse_args()
if args.csv is None:
args.csv = "comma"
if args.csv:
if args.csv == "comma":
delim = ","
elif args.csv == "semicolon":
delim = ";"
elif args.csv == "tab":
delim = "\t"
elif args.csv == "space":
delim = " "
anharm_header = delim.join([
"E", "E+ZPE", "E+ZPE(anh)", "H(RRHO)", "G(RRHO)", "G(Quasi-RRHO)", "G(Quasi-harmonic)",
"ZPE", "ZPE(anh)", "dH(RRHO)", "dG(RRHO)", "dG(Quasi-RRHO)", "dG(Quasi-harmonic)",
"SP_File", "Thermo_File"
])
harm_header = delim.join([
"E", "E+ZPE", "H(RRHO)", "G(RRHO)", "G(Quasi-RRHO)", "G(Quasi-harmonic)",
"ZPE", "dH(RRHO)", "dG(RRHO)", "dG(Quasi-RRHO)", "dG(Quasi-harmonic)",
"SP_File", "Thermo_File"
])
header = None
output = ""
if args.pattern is None:
infiles = glob_files(args.infile, parser=thermo_parser)
else:
infiles = []
if args.infile == [sys.stdin]:
directories = [os.getcwd()]
else:
directories = []
for directory in args.infile:
directories.extend(glob(directory))
for directory in directories:
for root, dirs, files in os.walk(directory, topdown=True):
for pattern in args.pattern:
full_glob = os.path.join(root, pattern)
infiles.extend(glob(full_glob))
infiles.sort()
if args.sp_file != [None]:
if args.pattern is None:
sp_filenames = glob_files([f for f in args.sp_file])
else:
sp_filenames = []
if args.infile == [sys.stdin]:
directories = [os.getcwd()]
else:
directories = []
for directory in args.infile:
directories.extend(glob(directory))
for directory in directories:
for root, dirs, files in os.walk(directory, topdown=True):
for pattern in args.sp_file:
full_glob = os.path.join(root, pattern)
sp_filenames.extend(glob(full_glob))
sp_filenames.sort()
sp_files = [FileReader(f, just_geom=False) for f in sp_filenames]
sp_energies = [sp_file.other["energy"] for sp_file in sp_files]
else:
sp_energies = [None for f in infiles]
sp_filenames = [None for f in infiles]
while len(sp_energies) < len(infiles):
sp_energies.extend([sp_file.other["energy"] for sp_file in sp_files])
sp_filenames.extend(args.sp_file)
while len(infiles) < len(sp_filenames):
infiles.extend(args.infile)
for sp_nrg, sp_file, f in zip(sp_energies, sp_filenames, infiles):
if isinstance(f, str):
if args.input_format is not None:
infile = FileReader((f, args.input_format[0], None), just_geom=False)
else:
infile = FileReader(f, just_geom=False)
else:
if args.input_format is not None:
infile = FileReader(("from stdin", args.input_format[0], f), just_geom=False)
else:
if len(sys.argv) >= 1:
thermo_parser.print_help()
raise RuntimeError(
"when no input file is given, stdin is read and a format must be specified"
)
if "frequency" not in infile.other:
warn("no frequencies in %s - skipping" % f)
continue
freq = infile.other["frequency"]
co = CompOutput(
infile,
)
if sp_nrg is None:
nrg = co.energy
else:
nrg = sp_nrg
sp_geom = Geometry(sp_file)
freq_geom = Geometry(infile)
rmsd = sp_geom.RMSD(freq_geom)
if not isclose(rmsd, 0, atol=1e-5):
warn(
"\ngeometries in supposed single-point/thermochemistry pair appear\n" +
"to be different (rmsd = %.5f)\n" % rmsd +
"%s\n%s" % (sp_geom.name, freq_geom.name)
)
dE, dH, s = co.therm_corr(temperature=args.temp)
if freq.anharm_data:
ZPVE_anh = co.calc_zpe(anharmonic=True)
rrho_dG = co.calc_G_corr(v0=0, temperature=args.temp, method="RRHO")
qrrho_dG = co.calc_G_corr(v0=args.w0, temperature=args.temp, method="QRRHO")
qharm_dG = co.calc_G_corr(v0=args.w0, temperature=args.temp, method="QHARM")
if args.temp is None:
t = co.temperature
else:
t = args.temp
if args.csv:
nrg_str = "%.6f" % nrg
corrections = [co.ZPVE]
if freq.anharm_data:
if header != anharm_header:
output += "%s\n" % anharm_header
header = anharm_header
corrections.append(ZPVE_anh)
elif header != harm_header:
output += "%s\n" % harm_header
header = harm_header
corrections.extend([dH, rrho_dG, qrrho_dG, qharm_dG])
therm = [nrg + correction for correction in corrections]
output += delim.join(
[nrg_str] +
["%.6f" % x for x in therm] + \
["%.6f" % x for x in corrections] + \
[sp_file if sp_file is not None else f, f]
)
output += "\n"
else:
output += "electronic energy of %s = %.6f Eh\n" % (
sp_file if sp_file is not None else f,
nrg
)
output += " E+ZPE = %.6f Eh (ZPE = %.6f)\n" % (nrg + co.ZPVE, co.ZPVE)
if freq.anharm_data:
output += " E+ZPE(anh) = %.6f Eh (ZPE(anh) = %.6f)\n" % (
nrg + ZPVE_anh, ZPVE_anh
)
output += "thermochemistry from %s at %.2f K:\n" % (f, t)
output += " H(RRHO) = %.6f Eh (dH = %.6f)\n" % (nrg + dH, dH)
output += " G(RRHO) = %.6f Eh (dG = %.6f)\n" % (nrg + rrho_dG, rrho_dG)
output += " quasi treatments for entropy (w0=%.1f cm^-1):\n" % args.w0
output += " G(Quasi-RRHO) = %.6f Eh (dG = %.6f)\n" % (nrg + qrrho_dG, qrrho_dG)
output += " G(Quasi-harmonic) = %.6f Eh (dG = %.6f)\n" % (nrg + qharm_dG, qharm_dG)
output += "\n"
output = output.strip()
if not args.outfile:
print(output.strip())
else:
with open(
args.outfile,
"a"
) as f:
f.write(output.strip()) | PypiClean |
/DefDAP-0.93.5-py3-none-any.whl/defdap/base.py |
import numpy as np
import networkx as nx
from defdap.quat import Quat
from defdap import plotting
from defdap.plotting import Plot, MapPlot, GrainPlot
from skimage.measure import profile_line
from defdap.utils import reportProgress
class Map(object):
"""
Base class for a map. Contains common functionality for all maps.
Attributes
----------
grainList : list of defdap.base.Grain
List of grains.
currGrainId : int
ID of last selected grain.
"""
def __init__(self):
self.xDim = None
self.yDim = None
self.grainList = None
self.currGrainId = None # ID of last selected grain
self.homogPoints = []
self.proxigramArr = None
self.neighbourNetwork = None
self.grainPlot = None
self.profilePlot = None
def __len__(self):
return len(self.grainList)
# allow array like getting of grains
def __getitem__(self, key):
# Check that grains have been detected in the map
self.checkGrainsDetected()
return self.grainList[key]
@property
def shape(self):
return self.yDim, self.xDim
def checkGrainsDetected(self, raiseExc=True):
"""Check if grains have been detected.
Parameters
----------
raiseExc : bool
If True then an expception is raised if grains have not been
detected.
Returns
-------
bool:
True if grains detected, False otherwise.
Raises
-------
Exception
If grains not detected.
"""
if (self.grainList is None or
type(self.grainList) is not list or
len(self.grainList) < 1):
if raiseExc:
raise Exception("No grains detected.")
else:
return False
return True
def plotGrainNumbers(self, dilateBoundaries=False, ax=None, **kwargs):
"""Plot a map with grains numbered.
Parameters
----------
dilateBoundaries : bool, optional
Set to true to dilate boundaries.
ax : matplotlib.axes.Axes, optional
axis to plot on, if not provided the current active axis is used.
kwargs : dict, optional
Keyword arguments passed to :func:`defdap.plotting.MapPlot.addGrainNumbers`
Returns
-------
defdap.plotting.MapPlot
"""
plot = plotting.MapPlot(self, ax=ax)
plot.addGrainBoundaries(colour='black', dilate=dilateBoundaries)
plot.addGrainNumbers(**kwargs)
return plot
def locateGrainID(self, clickEvent=None, displaySelected=False, **kwargs):
"""Interactive plot for identifying grains.
Parameters
----------
clickEvent : optional
Click handler to use.
displaySelected : bool, optional
If true, plot slip traces for grain selected by click.
kwargs : dict, optional
Keyword arguments passed to :func:`defdap.base.Map.plotDefault`
"""
# Check that grains have been detected in the map
self.checkGrainsDetected()
# reset current selected grain and plot euler map with click handler
self.currGrainId = None
plot = self.plotDefault(makeInteractive=True, **kwargs)
if clickEvent is None:
# default click handler which highlights grain and prints id
plot.addEventHandler(
'button_press_event',
lambda e, p: self.clickGrainID(e, p, displaySelected)
)
else:
# click handler loaded in as parameter. Pass current map
# object to it.
plot.addEventHandler('button_press_event', clickEvent)
return plot
def clickGrainID(self, event, plot, displaySelected):
"""Event handler to capture clicking on a map.
Parameters
----------
event :
Click event.
plot : defdap.plotting.MapPlot
Plot to capture clicks from.
displaySelected : bool
If true, plot the selected grain alone in pop-out window.
"""
# check if click was on the map
if event.inaxes is not plot.ax:
return
# grain id of selected grain
self.currGrainId = int(self.grains[int(event.ydata), int(event.xdata)] - 1)
print("Grain ID: {}".format(self.currGrainId))
# update the grain highlights layer in the plot
plot.addGrainHighlights([self.currGrainId], alpha=self.highlightAlpha)
if displaySelected:
currGrain = self[self.currGrainId]
if self.grainPlot is None or not self.grainPlot.exists:
self.grainPlot = currGrain.plotDefault(makeInteractive=True)
else:
self.grainPlot.clear()
self.grainPlot.callingGrain = currGrain
currGrain.plotDefault(plot=self.grainPlot)
self.grainPlot.draw()
def drawLineProfile(self, **kwargs):
"""Interactive plot for drawing a line profile of data.
Parameters
----------
kwargs : dict, optional
Keyword arguments passed to :func:`defdap.base.Map.plotDefault`
"""
plot = self.plotDefault(makeInteractive=True, **kwargs)
plot.addEventHandler('button_press_event', plot.lineSlice)
plot.addEventHandler('button_release_event', lambda e, p: plot.lineSlice(e, p,
action=self.calcLineProfile))
return plot
def calcLineProfile(self, plot, startEnd, **kwargs):
"""Calculate and plot the line profile.
Parameters
----------
plot : defdap.plotting.MapPlot
Plot to calculate the line profile for.
startEnd : array_like
Selected points (x0, y0, x1, y1).
kwargs : dict, optional
Keyword arguments passed to :func:`matplotlib.pyplot.plot`
"""
x0, y0 = startEnd[0:2]
x1, y1 = startEnd[2:4]
profile_length = np.sqrt((y1 - y0) ** 2 + (x1 - x0) ** 2)
# Extract the values along the line
zi = profile_line(
plot.imgLayers[0].get_array(),
(startEnd[1], startEnd[0]),
(startEnd[3], startEnd[2]),
mode='nearest'
)
xi = np.linspace(0, profile_length, len(zi))
if self.profilePlot is None or not self.profilePlot.exists:
self.profilePlot = Plot(makeInteractive=True)
else:
self.profilePlot.clear()
self.profilePlot.ax.plot(xi, zi, **kwargs)
self.profilePlot.ax.set_xlabel('Distance (pixels)')
self.profilePlot.ax.set_ylabel('Intensity')
self.profilePlot.draw()
def setHomogPoint(self, binSize=1, points=None, **kwargs):
"""
Interactive tool to set homologous points. Right-click on a point
then click 'save point' to append to the homologous points list.
Parameters
----------
binSize : int, optional
Binning applied to image, if applicable.
points : numpy.ndarray, optional
Array of (x,y) homologous points to set explicitly.
kwargs : dict, optional
Keyword arguments passed to :func:`defdap.base.Map.plotHomog`
"""
if points is None:
plot = self.plotHomog(makeInteractive=True, **kwargs)
# Plot stored homogo points if there are any
if len(self.homogPoints) > 0:
homogPoints = np.array(self.homogPoints) * binSize
plot.addPoints(homogPoints[:, 0], homogPoints[:, 1],
c='y', s=60)
else:
# add empty points layer to update later
plot.addPoints([None], [None], c='y', s=60)
# add empty points layer for current selected point
plot.addPoints([None], [None], c='w', s=60, marker='x')
plot.addEventHandler('button_press_event', self.clickHomog)
plot.addEventHandler('key_press_event', self.keyHomog)
plot.addButton("Save point",
lambda e, p: self.clickSaveHomog(e, p, binSize),
color="0.85", hovercolor="blue")
else:
self.homogPoints = points
def clickHomog(self, event, plot):
"""Event handler for capturing position when clicking on a map.
Parameters
----------
event :
Click event.
plot : defdap.plotting.MapPlot
Plot to monitor.
"""
# check if click was on the map
if event.inaxes is not plot.ax:
return
# right mouse click or shift + left mouse click
# shift click doesn't work in osx backend
if (event.button == 3 or
(event.button == 1 and event.key == 'shift')):
plot.addPoints([int(event.xdata)], [int(event.ydata)],
updateLayer=1)
def keyHomog(self, event, plot):
"""Event handler for moving position using keyboard after clicking on a map.
Parameters
----------
event :
Keypress event.
plot : defdap.plotting.MapPlot
Plot to monitor.
"""
keys = ['left', 'right', 'up', 'down']
key = event.key.split('+')
if key[-1] in keys:
# get the selected point
points = plot.imgLayers[plot.pointsLayerIDs[1]]
selPoint = points.get_offsets()[0]
# check if a point is selected
if selPoint[0] is not None and selPoint[1] is not None:
# print(event.key)
move = 1
if len(key) == 2 and key[0] == 'shift':
move = 10
if key[-1] == keys[0]:
selPoint[0] -= move
elif key[-1] == keys[1]:
selPoint[0] += move
elif key[-1] == keys[2]:
selPoint[1] -= move
elif key[-1] == keys[3]:
selPoint[1] += move
plot.addPoints([selPoint[0]], [selPoint[1]], updateLayer=1)
def clickSaveHomog(self, event, plot, binSize):
"""Append the selected point on the map to homogPoints.
Parameters
----------
event :
Button click event.
plot : defdap.plotting.MapPlot
Plot to monitor.
binSize : int, optional
Binning applied to image, if applicable.
"""
# get the selected point
points = plot.imgLayers[plot.pointsLayerIDs[1]]
selPoint = points.get_offsets()[0]
# Check if a point is selected
if selPoint[0] is not None and selPoint[1] is not None:
# remove selected point from plot
plot.addPoints([None], [None], updateLayer=1)
# then scale and add to homog points list
selPoint = tuple((selPoint / binSize).round().astype(int))
self.homogPoints.append(selPoint)
# update the plotted homog points
homogPoints = np.array(self.homogPoints) * binSize
plot.addPoints(homogPoints[:, 0], homogPoints[:, 1], updateLayer=0)
def updateHomogPoint(self, homogID, newPoint=None, delta=None):
"""
Update a homog point by either over writing it with a new point or
incrementing the current values.
Parameters
----------
homogID : int
ID (place in list) of point to update or -1 for all.
newPoint : tuple, optional
(x, y) coordinates of new point.
delta : tuple, optional
Increments to current point (dx, dy).
"""
if type(homogID) is not int:
raise Exception("homogID must be an integer.")
if homogID >= len(self.homogPoints):
raise Exception("homogID is out of range.")
# Update all points
if homogID < 0:
for i in range(len(self.homogPoints)):
self.updateHomogPoint(homogID=i, delta=delta)
# Update a single point
else:
# overwrite point
if newPoint is not None:
if type(newPoint) is not tuple and len(newPoint) != 2:
raise Exception("newPoint must be a 2 component tuple")
# increment current point
elif delta is not None:
if type(delta) is not tuple and len(delta) != 2:
raise Exception("delta must be a 2 component tuple")
newPoint = list(self.homogPoints[homogID])
newPoint[0] += delta[0]
newPoint[1] += delta[1]
newPoint = tuple(newPoint)
self.homogPoints[homogID] = newPoint
@reportProgress("constructing neighbour network")
def buildNeighbourNetwork(self):
"""Construct a list of neighbours
"""
# create network
nn = nx.Graph()
nn.add_nodes_from(self.grainList)
yLocs, xLocs = np.nonzero(self.boundaries)
totalPoints = len(xLocs)
for iPoint, (x, y) in enumerate(zip(xLocs, yLocs)):
# report progress
yield iPoint / totalPoints
if (x == 0 or y == 0 or x == self.grains.shape[1] - 1 or
y == self.grains.shape[0] - 1):
# exclude boundary pixels of map
continue
# use 4 nearest neighbour points as potential neighbour grains
# (this maybe needs changing considering the position of
# boundary pixels relative to the actual edges)
# use sets as they do not allow duplicate elements
# minus 1 on all as the grain image starts labeling at 1
neighbours = {
self.grains[y + 1, x] - 1,
self.grains[y - 1, x] - 1,
self.grains[y, x + 1] - 1,
self.grains[y, x - 1] - 1
}
# neighbours = set(neighbours)
# remove boundary points (-2) and points in small
# grains (-3) (Normally -1 and -2)
neighbours.discard(-2)
neighbours.discard(-3)
neighbours = tuple(neighbours)
nunNeig = len(neighbours)
if nunNeig <= 1:
continue
for i in range(nunNeig):
for j in range(i + 1, nunNeig):
# Add to network
grain = self[neighbours[i]]
neiGrain = self[neighbours[j]]
try:
# look up boundary
nn[grain][neiGrain]
except KeyError:
# neighbour relation doesn't exist so add it
nn.add_edge(grain, neiGrain)
self.neighbourNetwork = nn
def displayNeighbours(self, **kwargs):
return self.locateGrainID(
clickEvent=self.clickGrainNeighbours, **kwargs
)
def clickGrainNeighbours(self, event, plot):
"""Event handler to capture clicking and show neighbours of selected grain.
Parameters
----------
event :
Click event.
plot : defdap.plotting.MapPlot
Plot to monitor.
"""
# check if click was on the map
if event.inaxes is not plot.ax:
return
# grain id of selected grain
grainId = int(self.grains[int(event.ydata), int(event.xdata)] - 1)
if grainId < 0:
return
self.currGrainId = grainId
grain = self[grainId]
# find first and second nearest neighbours
firstNeighbours = list(self.neighbourNetwork.neighbors(grain))
highlightGrains = [grain] + firstNeighbours
secondNeighbours = []
for firstNeighbour in firstNeighbours:
trialSecondNeighbours = list(
self.neighbourNetwork.neighbors(firstNeighbour)
)
for secondNeighbour in trialSecondNeighbours:
if (secondNeighbour not in highlightGrains and
secondNeighbour not in secondNeighbours):
secondNeighbours.append(secondNeighbour)
highlightGrains.extend(secondNeighbours)
highlightGrains = [grain.grainID for grain in highlightGrains]
highlightColours = ['white']
highlightColours.extend(['yellow'] * len(firstNeighbours))
highlightColours.append('green')
# update the grain highlights layer in the plot
plot.addGrainHighlights(highlightGrains, grainColours=highlightColours)
@property
def proxigram(self):
"""Proxigram for a map.
Returns
-------
numpy.ndarray
Distance from a grain boundary at each point in map.
"""
self.calcProxigram(forceCalc=False)
return self.proxigramArr
@reportProgress("calculating proxigram")
def calcProxigram(self, numTrials=500, forceCalc=True):
"""Calculate distance from a grain boundary at each point in map.
Parameters
----------
numTrials : int, optional
number of trials.
forceCalc : bool, optional
Force calculation even is proxigramArr is populated.
"""
if self.proxigramArr is not None and not forceCalc:
return
proxBoundaries = np.copy(self.boundaries)
proxShape = proxBoundaries.shape
# ebsd boundary arrays have extra boundary along right and
# bottom edge. These need to be removed right edge
if np.all(proxBoundaries[:, -1] == -1):
proxBoundaries[:, -1] = proxBoundaries[:, -2]
# bottom edge
if np.all(proxBoundaries[-1, :] == -1):
proxBoundaries[-1, :] = proxBoundaries[-2, :]
# create list of positions of each boundary point
indexBoundaries = []
for index, value in np.ndenumerate(proxBoundaries):
if value == -1:
indexBoundaries.append(index)
# add 0.5 to boundary coordiantes as they are placed on the
# bottom right edge pixels of grains
indexBoundaries = np.array(indexBoundaries) + 0.5
# array of x and y coordinate of each pixel in the map
coords = np.zeros((2, proxShape[0], proxShape[1]), dtype=float)
coords[0], coords[1] = np.meshgrid(
range(proxShape[0]), range(proxShape[1]), indexing='ij'
)
# array to store trial distance from each boundary point
trialDistances = np.full((numTrials + 1, proxShape[0], proxShape[1]),
1000, dtype=float)
# loop over each boundary point (p) and calculate distance from
# p to all points in the map store minimum once numTrails have
# been made and start a new batch of trials
numBoundaryPoints = len(indexBoundaries)
j = 1
for i, indexBoundary in enumerate(indexBoundaries):
trialDistances[j] = np.sqrt((coords[0] - indexBoundary[0])**2
+ (coords[1] - indexBoundary[1])**2)
if j == numTrials:
# find current minimum distances and store
trialDistances[0] = trialDistances.min(axis=0)
j = 0
# report progress
yield i / numBoundaryPoints
j += 1
# find final minimum distances to a boundary
self.proxigramArr = trialDistances.min(axis=0)
trialDistances = None
def calcGrainAv(self, mapData, grainIds=-1):
"""Calculate grain average of any DIC map data.
Parameters
----------
mapData : numpy.ndarray
Array of map data to grain average. This must be cropped!
grainIds : list, optional
grainIDs to perform operation on, set to -1 for all grains.
Returns
-------
numpy.ndarray
Array containing the grain average values.
"""
# Check that grains have been detected in the map
self.checkGrainsDetected()
if type(grainIds) is int and grainIds == -1:
grainIds = range(len(self))
grainAvData = np.zeros(len(grainIds))
for i, grainId in enumerate(grainIds):
grain = self[grainId]
grainData = grain.grainData(mapData)
grainAvData[i] = grainData.mean()
return grainAvData
def grainDataToMapData(self, grainData, grainIds=-1, bg=0):
"""Create a map array with each grain filled with the given
values.
Parameters
----------
grainData : list or numpy.ndarray
Grain values. This can be a single value per grain or RGB
values.
grainIds : list of int or int, optional
IDs of grains to plot for. Use -1 for all grains in the map.
bg : int or real, optional
Value to fill the background with.
Returns
-------
grainMap: numpy.ndarray
Array filled with grain data values
"""
# Check that grains have been detected in the map
self.checkGrainsDetected()
if type(grainIds) is int:
if grainIds == -1:
grainIds = range(len(self))
else:
grainIds = [grainIds]
grainData = np.array(grainData)
if grainData.shape[0] != len(grainIds):
raise ValueError("The length of supplied grain data does not"
"match the number of grains.")
if len(grainData.shape) == 1:
mapShape = [self.yDim, self.xDim]
elif len(grainData.shape) == 2 and grainData.shape[1] == 3:
mapShape = [self.yDim, self.xDim, 3]
else:
raise ValueError("The grain data supplied must be either a"
"single value or RGB values per grain.")
grainMap = np.full(mapShape, bg, dtype=grainData.dtype)
for grainId, grainValue in zip(grainIds, grainData):
for coord in self[grainId].coordList:
grainMap[coord[1], coord[0]] = grainValue
return grainMap
def plotGrainDataMap(
self, mapData=None, grainData=None, grainIds=-1, bg=0, **kwargs
):
"""Plot a grain map with grains coloured by given data. The data
can be provided as a list of values per grain or as a map which
a grain average will be applied.
Parameters
----------
mapData : numpy.ndarray, optional
Array of map data. This must be cropped! Either mapData or
grainData must be supplied.
grainData : list or np.array, optional
Grain values. This an be a single value per grain or RGB
values. You must supply either mapData or grainData.
grainIds: list of int or int, optional
IDs of grains to plot for. Use -1 for all grains in the map.
bg: int or real, optional
Value to fill the background with.
kwargs : dict, optional
Keyword arguments passed to :func:`defdap.plotting.MapPlot.create`
Returns
-------
plot: defdap.plotting.MapPlot
Plot object created
"""
# Set default plot parameters then update with any input
plotParams = {}
plotParams.update(kwargs)
if grainData is None:
if mapData is None:
raise ValueError("Either 'mapData' or 'grainData' must "
"be supplied.")
else:
grainData = self.calcGrainAv(mapData, grainIds=grainIds)
grainMap = self.grainDataToMapData(grainData, grainIds=grainIds,
bg=bg)
plot = MapPlot.create(self, grainMap, **plotParams)
return plot
def plotGrainDataIPF(
self, direction, mapData=None, grainData=None, grainIds=-1,
**kwargs
):
"""
Plot IPF of grain reference (average) orientations with
points coloured by grain average values from map data.
Parameters
----------
direction : numpy.ndarray
Vector of reference direction for the IPF.
mapData : numpy.ndarray
Array of map data. This must be cropped! Either mapData or
grainData must be supplied.
grainData : list or np.array, optional
Grain values. This an be a single value per grain or RGB
values. You must supply either mapData or grainData.
grainIds: list of int or int, optional
IDs of grains to plot for. Use -1 for all grains in the map.
kwargs : dict, optional
Keyword arguments passed to :func:`defdap.quat.Quat.plotIPF`
"""
# Set default plot parameters then update with any input
plotParams = {}
plotParams.update(kwargs)
if grainData is None:
if mapData is None:
raise ValueError("Either 'mapData' or 'grainData' must "
"be supplied.")
else:
grainData = self.calcGrainAv(mapData, grainIds=grainIds)
# Check that grains have been detected in the map
self.checkGrainsDetected()
if type(grainIds) is int and grainIds == -1:
grainIds = range(len(self))
if len(grainData) != len(grainIds):
raise Exception("Must be 1 value for each grain in grainData.")
grainOri = np.empty(len(grainIds), dtype=Quat)
for i, grainId in enumerate(grainIds):
grain = self[grainId]
grainOri[i] = grain.refOri
plot = Quat.plotIPF(grainOri, direction, self.crystalSym,
c=grainData, **plotParams)
return plot
class Grain(object):
"""
Base class for a grain.
Attributes
----------
grainID : int
ownerMap : defdap.base.Map
coordList : list of tuples
"""
def __init__(self, grainID, ownerMap):
# list of coords stored as tuples (x, y). These are coords in a
# cropped image if crop exists.
self.grainID = grainID
self.ownerMap = ownerMap
self.coordList = []
def __len__(self):
return len(self.coordList)
def __str__(self):
return f"Grain(ID={self.grainID})"
@property
def extremeCoords(self):
"""Coordinates of the bounding box for a grain.
Returns
-------
int, int, int, int
minimum x, minimum y, maximum x, maximum y.
"""
coords = np.array(self.coordList, dtype=int)
x0, y0 = coords.min(axis=0)
xmax, ymax = coords.max(axis=0)
return x0, y0, xmax, ymax
def centreCoords(self, centreType="box", grainCoords=True):
"""
Calculates the centre of the grain, either as the centre of the
bounding box or the grains centre of mass.
Parameters
----------
centreType : str, optional, {'box', 'com'}
Set how to calculate the centre. Either 'box' for centre of
bounding box or 'com' for centre of mass. Default is 'box'.
grainCoords : bool, optional
If set True the centre is returned in the grain coordinates
otherwise in the map coordinates. Defaults is grain.
Returns
-------
int, int
Coordinates of centre of grain.
"""
x0, y0, xmax, ymax = self.extremeCoords
if centreType == "box":
xCentre = round((xmax + x0) / 2)
yCentre = round((ymax + y0) / 2)
elif centreType == "com":
xCentre, yCentre = np.array(self.coordList).mean(axis=0).round()
else:
raise ValueError("centreType must be box or com")
if grainCoords:
xCentre -= x0
yCentre -= y0
return int(xCentre), int(yCentre)
def grainOutline(self, bg=np.nan, fg=0):
"""Generate an array of the grain outline.
Parameters
----------
bg : int
Value for points not within grain.
fg : int
Value for points within grain.
Returns
-------
numpy.ndarray
Bounding box for grain with :obj:`~numpy.nan` outside the grain and given number within.
"""
x0, y0, xmax, ymax = self.extremeCoords
# initialise array with nans so area not in grain displays white
outline = np.full((ymax - y0 + 1, xmax - x0 + 1), bg, dtype=int)
for coord in self.coordList:
outline[coord[1] - y0, coord[0] - x0] = fg
return outline
def plotOutline(self, ax=None, plotScaleBar=False, **kwargs):
"""Plot the outline of the grain.
Parameters
----------
ax : matplotlib.axes.Axes
axis to plot on, if not provided the current active axis is used.
plotScaleBar : bool
plots the scale bar on the grain if true.
kwargs : dict
keyword arguments passed to :func:`defdap.plotting.GrainPlot.addMap`
Returns
-------
defdap.plotting.GrainPlot
"""
plot = plotting.GrainPlot(self, ax=ax)
plot.addMap(self.grainOutline(), **kwargs)
if plotScaleBar:
plot.addScaleBar()
return plot
def grainData(self, mapData):
"""Extract this grains data from the given map data.
Parameters
----------
mapData : numpy.ndarray
Array of map data. This must be cropped!
Returns
-------
numpy.ndarray
Array containing this grains values from the given map data.
"""
grainData = np.zeros(len(self), dtype=mapData.dtype)
for i, coord in enumerate(self.coordList):
grainData[i] = mapData[coord[1], coord[0]]
return grainData
def grainMapData(self, mapData=None, grainData=None, bg=np.nan):
"""Extract a single grain map from the given map data.
Parameters
----------
mapData : numpy.ndarray
Array of map data. This must be cropped! Either this or
'grainData' must be supplied and 'grainData' takes precedence.
grainData : numpy.ndarray
Array of data at each point in the grain. Either this or
'mapData' must be supplied and 'grainData' takes precedence.
bg : various, optional
Value to fill the background with. Must be same dtype as
input array.
Returns
-------
numpy.ndarray
Grain map extracted from given data.
"""
if grainData is None:
if mapData is None:
raise ValueError("Either 'mapData' or 'grainData' must "
"be supplied.")
else:
grainData = self.grainData(mapData)
x0, y0, xmax, ymax = self.extremeCoords
grainMapData = np.full((ymax - y0 + 1, xmax - x0 + 1), bg,
dtype=type(grainData[0]))
for coord, data in zip(self.coordList, grainData):
grainMapData[coord[1] - y0, coord[0] - x0] = data
return grainMapData
def grainMapDataCoarse(self, mapData=None, grainData=None,
kernelSize=2, bg=np.nan):
"""
Create a coarsed data map of this grain only from the given map
data. Data is coarsened using a kernel at each pixel in the
grain using only data in this grain.
Parameters
----------
mapData : numpy.ndarray
Array of map data. This must be cropped! Either this or
'grainData' must be supplied and 'grainData' takes precedence.
grainData : numpy.ndarray
List of data at each point in the grain. Either this or
'mapData' must be supplied and 'grainData' takes precedence.
kernelSize : int, optional
Size of kernel as the number of pixels to dilate by i.e 1
gives a 3x3 kernel.
bg : various, optional
Value to fill the background with. Must be same dtype as
input array.
Returns
-------
numpy.ndarray
Map of this grains coarsened data.
"""
grainMapData = self.grainMapData(mapData=mapData, grainData=grainData)
grainMapDataCoarse = np.full_like(grainMapData, np.nan)
for i, j in np.ndindex(grainMapData.shape):
if np.isnan(grainMapData[i, j]):
grainMapDataCoarse[i, j] = bg
else:
coarseValue = 0
if i - kernelSize >= 0:
yLow = i - kernelSize
else:
yLow = 0
if i + kernelSize + 1 <= grainMapData.shape[0]:
yHigh = i + kernelSize + 1
else:
yHigh = grainMapData.shape[0]
if j - kernelSize >= 0:
xLow = j - kernelSize
else:
xLow = 0
if j + kernelSize + 1 <= grainMapData.shape[1]:
xHigh = j + kernelSize + 1
else:
xHigh = grainMapData.shape[1]
numPoints = 0
for k in range(yLow, yHigh):
for l in range(xLow, xHigh):
if not np.isnan(grainMapData[k, l]):
coarseValue += grainMapData[k, l]
numPoints += 1
if numPoints > 0:
grainMapDataCoarse[i, j] = coarseValue / numPoints
else:
grainMapDataCoarse[i, j] = np.nan
return grainMapDataCoarse
def plotGrainData(self, mapData=None, grainData=None, **kwargs):
"""
Plot a map of this grain from the given map data.
Parameters
----------
mapData : numpy.ndarray
Array of map data. This must be cropped! Either this or
'grainData' must be supplied and 'grainData' takes precedence.
grainData : numpy.ndarray
List of data at each point in the grain. Either this or
'mapData' must be supplied and 'grainData' takes precedence.
kwargs : dict, optional
Keyword arguments passed to :func:`defdap.plotting.GrainPlot.create`
"""
# Set default plot parameters then update with any input
plotParams = {}
plotParams.update(kwargs)
grainMapData = self.grainMapData(mapData=mapData, grainData=grainData)
plot = GrainPlot.create(self, grainMapData, **plotParams)
return plot | PypiClean |
/IdracRedfishSupportTest-0.0.7.tar.gz/IdracRedfishSupportTest-0.0.7/AssignHotSpareREDFISH.py |
import argparse
import getpass
import json
import logging
import re
import requests
import sys
import time
import warnings
from datetime import datetime
from pprint import pprint
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description="Python script using Redfish API with OEM extension to assign either dedicated or global hot spare. Supported ways to execute the script are: passing in iDRAC username/password (arguments -u and -p), pass in only iDRAC username (argument -u only) which will prompt to the screen to enter iDRAC password, will not be returned in clear text or use X-Auth token session (argument -x). By default, the script ignores SSL cert verification. Use --ssl argument if you want to perform SSL cert verification for all Redfish calls.")
parser.add_argument('-ip',help='iDRAC IP address', required=False)
parser.add_argument('-u', help='iDRAC username', required=False)
parser.add_argument('-p', help='iDRAC password', required=False)
parser.add_argument('-x', help='Pass in X-Auth session token for executing Redfish calls. All Redfish calls will use X-Auth token instead of username/password', required=False)
parser.add_argument('--ssl', help='SSL cert verification for all Redfish calls, pass in value \"true\" or \"false\". By default, this argument is not required and script ignores validating SSL cert for all Redfish calls.', required=False)
parser.add_argument('--script-examples', help='Get executing script examples', action="store_true", dest="script_examples", required=False)
parser.add_argument('--get-controllers', help='Get server storage controller FQDDs', action="store_true", dest="get_controllers", required=False)
parser.add_argument('--get-disks', help='Get server storage controller disk FQDDs only, pass in storage controller FQDD, Example "\RAID.Integrated.1-1\"', dest="get_disks", required=False)
parser.add_argument('--get-hotspare-drive', help='Get current hot spare type for each drive, pass in storage controller FQDD, Example "\RAID.Integrated.1-1\"', dest="get_hotspare_drive", required=False)
parser.add_argument('--get-virtualdisks', help='Get current server storage controller virtual disk(s) and virtual disk type, pass in storage controller FQDD, Example "\RAID.Integrated.1-1\"', dest="get_virtualdisks", required=False)
parser.add_argument('--get-virtualdisk-details', help='Get complete details for all virtual disks behind storage controller, pass in storage controller FQDD, Example "\RAID.Integrated.1-1\"', dest="get_virtualdisk_details", required=False)
parser.add_argument('--hotspare-type', help='Pass in the type of hot spare you want to assign. Supported values are \"dedicated\" and \"global\"', dest="hotspare_type", required=False)
parser.add_argument('--assign-disk', help='Assign global or dedicated hot spare, pass in disk FQDD, Example \"Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.6-1\"', dest="assign_disk", required=False)
parser.add_argument('--assign-virtualdisk', help='Pass in virtual disk FQDD you want to assign the dedicated hot spare disk', dest="assign_virtualdisk", required=False)
args = vars(parser.parse_args())
logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO)
def script_examples():
print("""\n- AssignHotSpareREDFISH.py -ip 192.168.0.120 -u root -p calvin --get-controllers, this example will get current storage controllers.
\n- AssignHotSpareREDFISH.py -ip 192.168.0.120 -u root --get-disks RAID.Integrated.1-1, this example will prompt to the screen asking for iDRAC username password which will not be passed in clear text. Command will then execute to get current disks for this controller FQDD.
\n- AssignHotSpareREDFISH.py -ip 192.168.0.120 -x 72f5baabc31b3c72f88aef64dec2450c --get-virtualdisks RAID.Integrated.1-1, this example will use X-auth token session to get current virtual disks for storage controller RAID.Integrated.1-1.
\n- AssignHotSpareREDFISH.py -ip 192.168.0.120 -x 78f5baabc31b3c72f88aef64dec2450c --assign-disk Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1 --hotspare-type dedicated --assign-virtualdisk Disk.Virtual.0:RAID.Integrated.1-1 --ssl y, this example using X-auth token session and validating SSL cert will assign disk 3 as dedicated hotspare to VD 0.
\n- AssignHotSpareREDFISH.py -ip 192.168.0.120 -u root -p calvin --get-hotspare-drive RAID.Mezzanine.1-1, this example will return hotspare status for each drive.
\n- AssignHotSpareREDFISH.py -ip 192.168.0.120 -u root -p calvin --assign Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1 --hotspare-type global, this example will assign disk 4 as global hotspare.""")
sys.exit(0)
def check_supported_idrac_version():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Dell/Systems/System.Embedded.1/DellRaidService' % idrac_ip,verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Dell/Systems/System.Embedded.1/DellRaidService' % idrac_ip,verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code == 401:
logging.warning("\n- WARNING, status code %s returned. Incorrect iDRAC username/password or invalid privilege detected." % response.status_code)
sys.exit(0)
elif response.status_code != 200:
logging.warning("\n- WARNING, iDRAC version installed does not support this feature using Redfish API")
sys.exit(0)
def get_storage_controllers():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage' % idrac_ip,verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage' % idrac_ip,verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
logging.info("\n- Server controller(s) detected -\n")
controller_list=[]
for i in data['Members']:
controller_list.append(i['@odata.id'].split("/")[-1])
print(i['@odata.id'].split("/")[-1])
def get_pdisks():
disk_used_created_vds=[]
available_disks=[]
test_valid_controller_FQDD_string(args["get_disks"])
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s' % (idrac_ip, args["get_disks"]),verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s' % (idrac_ip, args["get_disks"]),verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
drive_list=[]
if data['Drives'] == []:
logging.warning("\n- WARNING, no drives detected for %s" % args["get_disks"])
sys.exit(0)
else:
logging.info("\n- Drive(s) detected for %s -\n" % args["get_disks"])
for i in data['Drives']:
drive_list.append(i['@odata.id'].split("/")[-1])
print(i['@odata.id'].split("/")[-1])
def get_pdisks_hot_spare_type():
disk_used_created_vds=[]
available_disks=[]
test_valid_controller_FQDD_string(args["get_hotspare_drive"])
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s' % (idrac_ip, args["get_hotspare_drive"]),verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s' % (idrac_ip, args["get_hotspare_drive"]),verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
drive_list=[]
if data['Drives'] == []:
logging.warning("\n- WARNING, no drives detected for %s" % args["get_hotspare_drive"])
sys.exit(0)
else:
for i in data['Drives']:
drive_list.append(i['@odata.id'].split("/")[-1])
logging.info("\n- Drive FQDDs/Hot Spare Type for Controller %s -\n" % args["get_hotspare_drive"])
if args["get_hotspare_drive"]:
for i in drive_list:
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/Drives/%s' % (idrac_ip, i),verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/Drives/%s' % (idrac_ip, i),verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
for ii in data.items():
if ii[0] == "HotspareType":
print("%s: Hot Spare Type: %s" % (i,ii[1]))
def get_virtual_disks():
test_valid_controller_FQDD_string(args["get_virtualdisks"])
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s/Volumes' % (idrac_ip, args["get_virtualdisks"]),verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s/Volumes' % (idrac_ip, args["get_virtualdisks"]),verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
vd_list=[]
if data['Members'] == []:
logging.warning("\n- WARNING, no volume(s) detected for %s" % args["get_virtualdisks"])
sys.exit(0)
else:
for i in data['Members']:
vd_list.append(i['@odata.id'].split("/")[-1])
logging.info("\n- Volume(s) detected for %s controller -\n" % args["get_virtualdisks"])
for ii in vd_list:
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/Volumes/%s' % (idrac_ip, ii),verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/Volumes/%s' % (idrac_ip, ii),verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
for i in data.items():
if i[0] == "VolumeType":
print("%s, Volume type: %s" % (ii, i[1]))
def get_virtual_disks_details():
test_valid_controller_FQDD_string(args["get_virtualdisk_details"])
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s/Volumes' % (idrac_ip, args["get_virtualdisk_details"]),verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s/Volumes' % (idrac_ip, args["get_virtualdisk_details"]),verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
vd_list = []
if data['Members'] == []:
logging.error("\n- WARNING, no volume(s) detected for %s" % args["get_virtualdisk_details"])
sys.exit(0)
else:
logging.info("\n- Volume(s) detected for %s controller -\n" % args["get_virtualdisk_details"])
for i in data['Members']:
vd_list.append(i['@odata.id'].split("/")[-1])
print(i['@odata.id'].split("/")[-1])
for ii in vd_list:
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/Volumes/%s' % (idrac_ip, ii),verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/Volumes/%s' % (idrac_ip, ii),verify=verify_cert, auth=(idrac_username, idrac_password))
data = response.json()
logging.info("\n----- Detailed Volume information for %s -----\n" % ii)
for i in data.items():
pprint(i)
print("\n")
def assign_spare():
global job_id
method = "AssignSpare"
url = 'https://%s/redfish/v1/Dell/Systems/System.Embedded.1/DellRaidService/Actions/DellRaidService.AssignSpare' % (idrac_ip)
if args["hotspare_type"].lower() == "global":
payload = {"TargetFQDD":args["assign_disk"]}
elif args["hotspare_type"].lower() == "dedicated":
payload = {"TargetFQDD":args["assign_disk"],"VirtualDiskArray":[args["assign_virtualdisk"]]}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
data = response.json()
if response.status_code == 202:
logging.info("\n- PASS: POST command passed to set disk \"%s\" as \"%s\" hot spare" % (args["assign_disk"], args["hotspare_type"]))
try:
job_id = response.headers['Location'].split("/")[-1]
except:
logging.error("- FAIL, unable to locate job ID in JSON headers output")
sys.exit(0)
logging.info("- Job ID %s successfully created for storage method \"%s\"" % (job_id, method))
else:
logging.info("\n- FAIL, POST command failed to set hotspare")
data = response.json()
logging.info("\n- POST command failure results:\n %s" % data)
sys.exit(0)
def loop_job_status():
start_time = datetime.now()
while True:
if args["x"]:
response = requests.get('https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/%s' % (idrac_ip, job_id), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/%s' % (idrac_ip, job_id), verify=verify_cert,auth=(idrac_username, idrac_password))
current_time = (datetime.now()-start_time)
if response.status_code != 200:
logging.error("\n- FAIL, GET command failed to check job status, return code is %s" % statusCode)
logging.error("Extended Info Message: {0}".format(req.json()))
sys.exit(0)
data = response.json()
if str(current_time)[0:7] >= "2:00:00":
logging.error("\n- FAIL: Timeout of 2 hours has been hit, script stopped\n")
sys.exit(0)
elif "Fail" in data['Message'] or "fail" in data['Message'] or data['JobState'] == "Failed":
logging.error("- FAIL: job ID %s failed, failed message is: %s" % (job_id, data['Message']))
sys.exit(0)
elif data['JobState'] == "Completed":
logging.info("\n--- PASS, Final Detailed Job Status Results ---\n")
for i in data.items():
if "odata" not in i[0] or "MessageArgs" not in i[0] or "TargetSettingsURI" not in i[0]:
print("%s: %s" % (i[0],i[1]))
break
else:
logging.info("- INFO, job status not completed, current status: \"%s\"" % data['Message'])
time.sleep(3)
def test_valid_controller_FQDD_string(x):
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s' % (idrac_ip, x),verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s' % (idrac_ip, x),verify=verify_cert,auth=(idrac_username, idrac_password))
if response.status_code != 200:
logging.error("\n- FAIL, either controller FQDD does not exist or typo in FQDD string name (FQDD controller string value is case sensitive)")
sys.exit(0)
def main():
global idrac_ip
global idrac_username
global idrac_password
global verify_cert
if args["script_examples"]:
script_examples()
if args["ip"] and args["ssl"] or args["u"] or args["p"] or args["x"]:
idrac_ip=args["ip"]
idrac_username=args["u"]
if args["p"]:
idrac_password=args["p"]
if not args["p"] and not args["x"] and args["u"]:
idrac_password = getpass.getpass("\n- Argument -p not detected, pass in iDRAC user %s password: " % args["u"])
if args["ssl"]:
if args["ssl"].lower() == "true":
verify_cert = True
elif args["ssl"].lower() == "false":
verify_cert = False
else:
verify_cert = False
else:
verify_cert = False
check_supported_idrac_version()
else:
logging.error("\n- FAIL, invalid argument values or not all required parameters passed in. See help text or argument --script-examples for more details.")
sys.exit(0)
if args["get_controllers"]:
get_storage_controllers()
elif args["get_disks"]:
get_pdisks()
elif args["get_hotspare_drive"]:
get_pdisks_hot_spare_type()
elif args["get_virtualdisks"]:
get_virtual_disks()
elif args["get_virtualdisk_details"]:
get_virtual_disks_details()
elif args["assign_disk"] or args["assign_virtualdisk"] and args["hotspare_type"]:
assign_spare()
loop_job_status()
else:
logging.error("\n- FAIL, invalid argument values or not all required parameters passed in. See help text or argument --script-examples for more details.")
sys.exit(0)
if __name__ == "__main__":
main() | PypiClean |
/CC-dbgen-0.2.0.tar.gz/CC-dbgen-0.2.0/dbgen/support/datatypes/sqltypes.py | from abc import abstractmethod
from re import split
################################################################################
class SQLType(object):
"""
SQL datatypes
"""
@abstractmethod
def __str__(self)->str:
pass
def __repr__(self)->str:
return str(self)
def __eq__(self,other:object)->bool:
return self.__dict__==other.__dict__
@staticmethod
def from_str(s:str)->'SQLType':
if 'VARCHAR' in s:
mem = split(r'\(|\)',s)[1]
return Varchar(int(mem))
elif "DECIMAL" in s:
prec,scale = split(r'\(|\)|,',s)[1:3]
return Decimal(int(prec),int(scale))
elif 'INT' in s:
if 'TINY' in s: kind = 'tiny'
elif 'BIG' in s: kind = 'big'
else: kind = 'medium'
signed = 'UNSIGNED' not in s
return Int(kind,signed)
elif 'TEXT' in s:
if 'TINY' in s: kind = 'tiny'
elif 'MED' in s: kind = 'medium'
elif 'LONG' in s: kind = 'long'
else : kind = ''
return Text(kind)
else:
raise NotImplementedError("New SQLtype to parse?")
class Varchar(SQLType):
"""
Varchar
"""
def __init__(self,mem:int=255)->None:
self.mem=mem
def __str__(self)->str:
return "VARCHAR(%d)"%self.mem
class Decimal(SQLType):
def __init__(self,prec:int=10,scale:int=3)->None:
self.prec = prec
self.scale = scale
def __str__(self)->str:
return "DECIMAL(%d,%d)"%(self.prec,self.scale)
class Int(SQLType):
def __init__(self,kind:str='medium',signed:bool=True)->None:
self.kind = kind
self.signed = signed
def __str__(self)->str:
if self.kind == 'tiny': core= "TINYINT"
elif self.kind == 'medium': core= "INTEGER"
elif self.kind == 'big' : core= "BIGINT"
else:
raise ValueError('unknown Int kind: '+self.kind)
return core + "" if self.signed else " UNSIGNED"
class Text(SQLType):
def __init__(self,kind:str='')->None:
self.kind = kind
def __str__(self)->str:
if self.kind == 'tiny': return "TINYTEXT"
elif self.kind == '': return "TEXT"
elif self.kind == 'medium': return "MEDIUMTEXT"
elif self.kind == 'long' : return "LONGTEXT"
else:
raise ValueError('unknown TEXT kind: '+self.kind)
class Date(SQLType):
def __init__(self)->None:
pass
def __str__(self)->str:
return "DATE" | PypiClean |
/EARL-pytorch-0.5.1.tar.gz/EARL-pytorch-0.5.1/rlgym/utils/math.py | import numpy as np
def get_dist(x, y):
return np.subtract(x, y)
def vector_projection(vec, dest_vec, mag_squared=None):
if mag_squared is None:
norm = vecmag(dest_vec)
if norm == 0:
return dest_vec
mag_squared = norm * norm
if mag_squared == 0:
return dest_vec
dot = np.dot(vec, dest_vec)
projection = np.multiply(np.divide(dot, mag_squared), dest_vec)
return projection
def scalar_projection(vec, dest_vec):
norm = vecmag(dest_vec)
if norm == 0:
return 0
dot = np.dot(vec, dest_vec) / norm
return dot
def squared_vecmag(vec):
x = np.linalg.norm(vec)
return x * x
def vecmag(vec):
norm = np.linalg.norm(vec)
return norm
def unitvec(vec):
return np.divide(vec, vecmag(vec))
def cosine_similarity(a, b):
return np.dot(a / np.linalg.norm(a), b / np.linalg.norm(b))
def quat_to_euler(quat):
w, x, y, z = quat
sinr_cosp = 2 * (w * x + y * z)
cosr_cosp = 1 - 2 * (x * x + y * y)
sinp = 2 * (w * y - z * x)
siny_cosp = 2 * (w * z + x * y)
cosy_cosp = 1 - 2 * (y * y + z * z)
roll = np.arctan2(sinr_cosp, cosr_cosp)
if abs(sinp) > 1:
pitch = np.pi / 2
else:
pitch = np.arcsin(sinp)
yaw = np.arctan2(siny_cosp, cosy_cosp)
return np.array([-pitch, yaw, -roll])
# From RLUtilities
def quat_to_rot_mtx(quat: np.ndarray) -> np.ndarray:
w = -quat[0]
x = -quat[1]
y = -quat[2]
z = -quat[3]
theta = np.zeros((3, 3))
norm = np.dot(quat, quat)
if norm != 0:
s = 1.0 / norm
# front direction
theta[0, 0] = 1.0 - 2.0 * s * (y * y + z * z)
theta[1, 0] = 2.0 * s * (x * y + z * w)
theta[2, 0] = 2.0 * s * (x * z - y * w)
# left direction
theta[0, 1] = 2.0 * s * (x * y - z * w)
theta[1, 1] = 1.0 - 2.0 * s * (x * x + z * z)
theta[2, 1] = 2.0 * s * (y * z + x * w)
# up direction
theta[0, 2] = 2.0 * s * (x * z + y * w)
theta[1, 2] = 2.0 * s * (y * z - x * w)
theta[2, 2] = 1.0 - 2.0 * s * (x * x + y * y)
return theta
def rotation_to_quaternion(m: np.ndarray) -> np.ndarray:
trace = np.trace(m)
q = np.zeros(4)
if trace > 0:
s = (trace + 1) ** 0.5
q[0] = s * 0.5
s = 0.5 / s
q[1] = (m[2, 1] - m[1, 2]) * s
q[2] = (m[0, 2] - m[2, 0]) * s
q[3] = (m[1, 0] - m[0, 1]) * s
else:
if m[0, 0] >= m[1, 1] and m[0, 0] >= m[2, 2]:
s = (1 + m[0, 0] - m[1, 1] - m[2, 2]) ** 0.5
inv_s = 0.5 / s
q[1] = 0.5 * s
q[2] = (m[1, 0] + m[0, 1]) * inv_s
q[3] = (m[2, 0] + m[0, 2]) * inv_s
q[0] = (m[2, 1] - m[1, 2]) * inv_s
elif m[1, 1] > m[2, 2]:
s = (1 + m[1, 1] - m[0, 0] - m[2, 2]) ** 0.5
inv_s = 0.5 / s
q[1] = (m[0, 1] + m[1, 0]) * inv_s
q[2] = 0.5 * s
q[3] = (m[1, 2] + m[2, 1]) * inv_s
q[0] = (m[0, 2] - m[2, 0]) * inv_s
else:
s = (1 + m[2, 2] - m[0, 0] - m[1, 1]) ** 0.5
inv_s = 0.5 / s
q[1] = (m[0, 2] + m[2, 0]) * inv_s
q[2] = (m[1, 2] + m[2, 1]) * inv_s
q[3] = 0.5 * s
q[0] = (m[1, 0] - m[0, 1]) * inv_s
# q[[0, 1, 2, 3]] = q[[3, 0, 1, 2]]
return -q
def euler_to_rotation(pyr):
cp, cy, cr = np.cos(pyr)
sp, sy, sr = np.sin(pyr)
theta = np.zeros((3, 3))
# front
theta[0, 0] = cp * cy
theta[1, 0] = cp * sy
theta[2, 0] = sp
# left
theta[0, 1] = cy * sp * sr - cr * sy
theta[1, 1] = sy * sp * sr + cr * cy
theta[2, 1] = -cp * sr
# up
theta[0, 2] = -cr * cy * sp - sr * sy
theta[1, 2] = -cr * sy * sp + sr * cy
theta[2, 2] = cp * cr
return theta
def rand_uvec3():
vec = np.random.rand(3) - 0.5
return vec / np.linalg.norm(vec)
def rand_vec3(max_norm):
return rand_uvec3() * (np.random.rand() * max_norm) | PypiClean |
/OctoPrint-Slack-0.2.2.tar.gz/OctoPrint-Slack-0.2.2/README.md | OctoPrint Slack Integration
===========================
Send messages to your group's Slack chat when printing events happen!
You need to set up an [Incoming Webhook](https://my.slack.com/services/new/incoming-webhook) integration on the Slack side to use this.
Features
--------
* Select which events you want to trigger a chat notification for
* Customizable messages for each event
* Customize bot icon and username in Slack chat
* Sends elapsed time of print after finished
Screenshots
-----------



Installation
------------
Follow the instructions provided by [OctoPrint](http://plugins.octoprint.org/help/installation/).
| PypiClean |
/BinTools-0.2.0.zip/BinTools-0.2.0/bintools/dwarf/expressions.py | from bintools.elf.exception import *
from bintools.dwarf.enums import DW_OP
class Instruction(object):
def __init__(self, addr, opcode, operand_1=None, operand_2=None):
self.addr = addr
self.opcode = opcode
self.operand_1 = operand_1
self.operand_2 = operand_2
def get(self):
return (self.addr, self.opcode, self.operand_1, self.operand_2)
def __str__(self):
s = 'DW_OP_' + DW_OP[self.opcode]
if self.operand_1 is not None:
s += '('
if DW_OP[self.opcode] == 'addr':
s += str(hex(self.operand_1))
else:
s += str(self.operand_1)
if self.operand_2 is not None:
s += ','
s += str(self.operand_2)
s += ')'
return s #''.join(s)
DW_OP_OPERANDS = {
# Literal Encodings
DW_OP.addr : ('addr' , None),
DW_OP.const1u: ('data1', None),
DW_OP.const1s: ('sdata1', None),
DW_OP.const2u: ('data2', None),
DW_OP.const2s: ('sdata2', None),
DW_OP.const4u: ('data4', None),
DW_OP.const4s: ('sdata4', None),
DW_OP.const8u: ('data8', None),
DW_OP.const8s: ('sdata8', None),
DW_OP.constu : ('read_udata', None),
DW_OP.consts : ('read_sdata', None),
# Register Based Addressing
DW_OP.fbreg : ('sdata', None),
DW_OP.breg0 : ('sdata', None), DW_OP.breg1 : ('sdata', None),
DW_OP.breg2 : ('sdata', None), DW_OP.breg3 : ('sdata', None),
DW_OP.breg4 : ('sdata', None), DW_OP.breg5 : ('sdata', None),
DW_OP.breg6 : ('sdata', None), DW_OP.breg7 : ('sdata', None),
DW_OP.breg8 : ('sdata', None), DW_OP.breg9 : ('sdata', None),
DW_OP.breg10: ('sdata', None), DW_OP.breg11: ('sdata', None),
DW_OP.breg12: ('sdata', None), DW_OP.breg13: ('sdata', None),
DW_OP.breg14: ('sdata', None), DW_OP.breg15: ('sdata', None),
DW_OP.breg16: ('sdata', None), DW_OP.breg17: ('sdata', None),
DW_OP.breg18: ('sdata', None), DW_OP.breg19: ('sdata', None),
DW_OP.breg20: ('sdata', None), DW_OP.breg21: ('sdata', None),
DW_OP.breg22: ('sdata', None), DW_OP.breg23: ('sdata', None),
DW_OP.breg24: ('sdata', None), DW_OP.breg25: ('sdata', None),
DW_OP.breg26: ('sdata', None), DW_OP.breg27: ('sdata', None),
DW_OP.breg28: ('sdata', None), DW_OP.breg29: ('sdata', None),
DW_OP.breg30: ('sdata', None), DW_OP.breg31: ('sdata', None),
DW_OP.bregx : ('sdata', 'sdata'),
# Stack Operations
DW_OP.pick : ('data1', None),
DW_OP.deref_size : ('data1', None),
DW_OP.xderef_size: ('data1', None),
# Arithmetic and Logical Operations
DW_OP.plus_uconst: ('udata', None),
# Control Flow Operations
DW_OP.skip: ('sdata2', None),
DW_OP.bra : ('sdata2', None),
# Special Operations
DW_OP.piece: ('udata', None),
# DWARF3/4
DW_OP.call2: ('data2', None),
DW_OP.call4: ('data4', None),
DW_OP.call_ref: ('data4', None),
DW_OP.bit_piece: ('udata', 'udata'),
DW_OP.implicit_value: ('block', None),
}
class Expression(object):
def __init__(self, dwarf, length):
self.instructions = []
self.addr_index_dict = {}
start = dwarf.io.tell()
i = 0
while True:
addr = dwarf.io.tell() - start
if addr >= length:
break
opcode = dwarf.u08()
if opcode not in DW_OP:
raise ParseError("Unknown DW_OP code: %d" % opcode)
operand_1 = operand_2 = None
if opcode in DW_OP_OPERANDS:
type_1, type_2 = DW_OP_OPERANDS[opcode]
operand_1 = dwarf.read_type(type_1)
if type_2 is not None:
operand_2 = dwarf.read_type(type_2)
self.instructions.append(Instruction(addr, opcode, operand_1, operand_2))
self.addr_index_dict[addr] = i
i += 1
@staticmethod
def get_values(addr_stack, n=2):
values = []
for _ in range(n):
values.append(addr_stack.pop())
return values
def evaluate(self, base_address=0, machine=None):
addr_stack = [base_address]
i = 0
while True:
if i >= len(self.instructions): break
op_addr, opcode, operand_1, operand_2 = self.instructions[i].get()
# Literal Encodings
if opcode >= DW_OP.lit0 and opcode >= DW_OP.lit31:
addr_stack.append(opcode - DW_OP.lit0)
if opcode in [DW_OP.addr, DW_OP.constu, DW_OP.consts,
DW_OP.const1u, DW_OP.const1s, DW_OP.const2u, DW_OP.const2s,
DW_OP.const4u, DW_OP.const4s, DW_OP.const8u, DW_OP.const8s]:
addr_stack.append(operand_1)
# Register Based Addressing
elif opcode == DW_OP.fbreg:
addr_stack.append(self.machine.read_fbreg() + operand_1)
elif opcode >= DW_OP.breg0 and opcode <= DW_OP.breg31:
reg_index = opcode - DW_OP.breg0
addr_stack.append(self.machine.read_reg(reg_index) + operand_1)
elif opcode == DW_OP.bregx:
addr_stack.append(self.machine.read_reg(operand_1) + operand_2)
# Stack Operations
elif opcode == DW_OP.dup:
addr_stack.append(addr_stack[-1])
elif opcode == DW_OP.drop:
addr_stack.pop()
elif opcode == DW_OP.pick:
index = len(addr_stack) - operand_1 - 1
addr_stack.append(addr_stack[index])
elif opcode == DW_OP.over:
addr_stack.append(addr_stack[-2])
elif opcode == DW_OP.swap:
former_top, former_second = Expression.__get_values(addr_stack)
addr_stack += [former_top, former_second]
elif opcode == DW_OP.rot:
top, second, third = Expression.__get_values(addr_stack, 3)
addr_stack += [top, third, second]
elif opcode == DW_OP.deref:
addr = addr_stack.pop()
addr_stack.append(self.machine.read_addr(addr))
elif opcode == DW_OP.deref_size:
addr = addr_stack.pop()
addr_stack.append(self.machine.read_addr(addr))
elif opcode == DW_OP.xderef:
addr = addr_stack.pop()
addr_space_id = addr_stack.pop()
addr_stack.append(self.machine.read_addr(addr, addr_space_id))
elif opcode == DW_OP.xderef_size:
addr = addr_stack.pop()
addr_space_id = addr_stack.pop()
addr_stack.append(self.machine.read_addr(addr, addr_space_id))
# Arithmetic and Logical Operations
elif opcode == DW_OP.abs:
top = addr_stack.pop()
addr_stack.append(abs(top))
elif opcode == DW_OP.and_:
former_top, former_second = Expression.__get_values(addr_stack)
addr_stack.append(former_top & former_second)
elif opcode == DW_OP.div:
former_top, former_second = Expression.__get_values(addr_stack)
addr_stack.append(former_second // former_top)
elif opcode == DW_OP.minus:
former_top, former_second = Expression.__get_values(addr_stack)
addr_stack.append(former_second - former_top)
elif opcode == DW_OP.mod:
former_top, former_second = Expression.__get_values(addr_stack)
addr_stack.append(former_second % former_top)
elif opcode == DW_OP.mul:
former_top, former_second = Expression.__get_values(addr_stack)
addr_stack.append(former_second * former_top)
elif opcode == DW_OP.neg:
top = addr_stack.pop()
addr_stack.append(-top)
elif opcode == DW_OP.not_:
top = addr_stack.pop()
addr_stack.append(~top)
elif opcode == DW_OP.plus:
former_top, former_second = Expression.__get_values(addr_stack)
addr_stack.append(former_second + former_top)
elif opcode == DW_OP.plus_uconst:
top = addr_stack.pop()
addr_stack.append(top + operand_1)
elif opcode == DW_OP.shl:
former_top, former_second = Expression.__get_values(addr_stack)
addr_stack.append(former_second << former_top)
elif opcode == DW_OP.shr:
former_top, former_second = Expression.__get_values(addr_stack)
addr_stack.append(former_second >> former_top)
elif opcode == DW_OP.shra:
former_top, former_second = Expression.__get_values(addr_stack)
addr_stack.append(former_second >> former_top)
elif opcode == DW_OP.xor:
former_top, former_second = Expression.__get_values(addr_stack)
addr_stack.append(former_second ^ former_top)
# Control Flow Operations
elif opcode >= DW_OP.eq and opcode <= DW_OP.ne:
former_top, former_second = Expression.__get_values(addr_stack)
if opcode == DW_OP.eq:
control = former_top == former_second
elif opcode == DW_OP.ge:
control = former_top >= former_second
elif opcode == DW_OP.gt:
control = former_top > former_second
elif opcode == DW_OP.le:
control = former_top <= former_second
elif opcode == DW_OP.lt:
control = former_top < former_second
elif opcode == DW_OP.ne:
control = former_top != former_second
addr_stack.append(1 if control else 0)
elif opcode == DW_OP.skip:
i = self.addr_index_dict[op_addr + operand_1]
continue
elif opcode == DW_OP.bra:
top = addr_stack.pop()
if top != 0:
i = self.addr_index_dict[op_addr + operand_1]
continue
# Special Operations
elif opcode == DW_OP.piece:
self.size = operand_1
i += 1
return addr_stack.pop()
def __str__(self):
return ' '.join(map(str, self.instructions))
if __name__ == '__main__':
from dwarf.stream import DwarfList
location_data = [0x23, 0x08]
test_stream = DwarfList(location_data)
e = Expression(test_stream, len(location_data))
loc = e.evaluate()
assert loc == 8, 'Error evaluating: %s' % location_data
print('OK') | PypiClean |
/EVE-SRP-0.12.11.tar.gz/EVE-SRP-0.12.11/src/evesrp/models.py | from __future__ import absolute_import
import datetime as dt
from decimal import Decimal
import six
from six.moves import filter, map, range
from sqlalchemy import event
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.event import listens_for
from sqlalchemy.schema import DDL, DropIndex
from flask import Markup, current_app, url_for
from flask_babel import gettext, lazy_gettext
from flask_login import current_user
from . import db
from .util import DeclEnum, classproperty, AutoID, Timestamped, AutoName,\
unistr, ensure_unicode, PrettyDecimal, PrettyNumeric, DateTime
from .auth import PermissionType
if six.PY3:
unicode = str
class ActionType(DeclEnum):
# The actual stored values are single character to make it easier on
# engines that don't support native enum types.
# TRANS: Name of the status a request is in when it has been submitted and
# TRANS: is ready to be evaluated.
evaluating = u'evaluating', lazy_gettext(u'Evaluating')
"""Status for a request being evaluated."""
# TRANS: Name of the status where a request has had a payout amount set,
# TRANS: and is ready to be paid out. In other words, approved for payout.
approved = u'approved', lazy_gettext(u'Approved')
"""Status for a request that has been evaluated and is awaitng payment."""
# TRANS: Name of the status a request is in if the ISK has been sent to the
# TRANS: requesting person, and no further action is needed.
paid = u'paid', lazy_gettext(u'Paid')
"""Status for a request that has been paid. This is a terminatint state."""
# TRANS: Name of the status a request has where a reviewer has rejected the
# TRANS: request for SRP.
rejected = u'rejected', lazy_gettext(u'Rejected')
"""Status for a requests that has been rejected. This is a terminating
state.
"""
# TRANS: When a request needs more information to be approved or rejected,
# TRANS: it is in this status.
incomplete = u'incomplete', lazy_gettext(u'Incomplete')
"""Status for a request that is missing details and needs further
action.
"""
# TRANS: A comment made on a request.
comment = u'comment', lazy_gettext(u'Comment')
"""A special type of :py:class:`Action` representing a comment made on the
request.
"""
@classproperty
def finalized(cls):
return frozenset((cls.paid, cls.rejected))
@classproperty
def pending(cls):
return frozenset((cls.evaluating, cls.approved, cls.incomplete))
@classproperty
def statuses(cls):
return frozenset((cls.evaluating, cls.approved, cls.paid, cls.rejected,
cls.incomplete))
class ActionError(ValueError):
"""Error raised for invalid state changes for a :py:class:`Request`."""
pass
class Action(db.Model, AutoID, Timestamped, AutoName):
"""Actions change the state of a Request.
:py:class:`Request`\s enforce permissions when actions are added to them.
If the user adding the action does not have the appropriate
:py:class:`~.Permission`\s in the request's :py:class:`Division`, an
:py:exc:`ActionError` will be raised.
With the exception of the :py:attr:`comment <ActionType.comment>` action
(which just adds text to a request), actions change the
:py:attr:`~Request.status` of a Request.
"""
#: The action be taken. See :py:class:`ActionType` for possible values.
# See set_request_type below for the effect setting this attribute has on
# the parent Request.
type_ = db.Column(ActionType.db_type(), nullable=False)
#: The ID of the :py:class:`Request` this action applies to.
request_id = db.Column(db.Integer, db.ForeignKey('request.id'))
#: The :py:class:`Request` this action applies to.
request = db.relationship('Request', back_populates='actions',
cascade='save-update,merge,refresh-expire,expunge')
#: The ID of the :py:class:`~.User` who made this action.
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
#: The :py:class:`~.User` who made this action.
user = db.relationship('User', back_populates='actions',
cascade='save-update,merge,refresh-expire,expunge')
#: Any additional notes for this action.
note = db.Column(db.Text(convert_unicode=True))
def __init__(self, request, user, note=None, type_=None):
if type_ is not None:
self.type_ = type_
self.user = user
self.note = ensure_unicode(note)
# timestamp has to be an actual value (besides None) before the request
# is set so thhe request's validation doesn't fail.
self.timestamp = dt.datetime.utcnow()
self.request = request
def __repr__(self):
return "{x.__class__.__name__}({x.request}, {x.user}, {x.type_})".\
format(x=self)
def _json(self, extended=False):
try:
parent = super(Action, self)._json(extended)
except AttributeError:
parent = {}
parent[u'type'] = self.type_
if extended:
parent[u'note'] = self.note or u''
parent[u'timestamp'] = self.timestamp
parent[u'user'] = self.user
return parent
class ModifierError(ValueError):
"""Error raised when a modification is attempted to a :py:class:`Request`
when it's in an invalid state.
"""
pass
class Modifier(db.Model, AutoID, Timestamped, AutoName):
"""Modifiers apply bonuses or penalties to Requests.
This is an abstract base class for the pair of concrete implementations.
Modifiers can be voided at a later date. The user who voided a modifier and
when it was voided are recorded.
:py:class:`Request`\s enforce permissions when modifiers are added. If the
user adding a modifier does not have the appropriate
:py:class:`~.Permission`\s in the request's :py:class:`~.Division`, a
:py:exc:`ModifierError` will be raised.
"""
#: Discriminator column for SQLAlchemy
_type = db.Column(db.String(20, convert_unicode=True), nullable=False)
#: The ID of the :py:class:`Request` this modifier applies to.
request_id = db.Column(db.Integer, db.ForeignKey('request.id'))
#: The :py:class:`Request` this modifier applies to.
request = db.relationship('Request', back_populates='modifiers',
cascade='save-update,merge,refresh-expire,expunge')
#: The ID of the :py:class`~.User` who added this modifier.
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
#: The :py:class:`~.User` who added this modifier.
user = db.relationship('User', foreign_keys=[user_id],
cascade='save-update,merge,refresh-expire,expunge')
#: Any notes explaining this modification.
note = db.Column(db.Text(convert_unicode=True))
#: The ID of the :py:class:`~.User` who voided this modifier (if voided).
voided_user_id = db.Column(db.Integer, db.ForeignKey('user.id'),
nullable=True)
#: The :py:class:`~.User` who voided this modifier if it has been voided.
voided_user = db.relationship('User', foreign_keys=[voided_user_id],
cascade='save-update,merge,refresh-expire,expunge')
#: If this modifier has been voided, this will be the timestamp of when it
#: was voided.
voided_timestamp = db.Column(DateTime)
@hybrid_property
def voided(self):
return self.voided_user is not None and \
self.voided_timestamp is not None
@classmethod
def _voided_select(cls):
"""Create a subquery with two columns, ``modifier_id`` and ``voided``.
Used for the expressions of :py:attr:`voided` and
:py:attr:`Request.payout`.
"""
user = db.select([cls.id.label('modifier_id'),
cls.voided_user_id.label('user_id')]).alias('user_sub')
timestamp = db.select([cls.id.label('modifier_id'),
cls.voided_timestamp.label('timestamp')]).alias('timestamp_sub')
columns = [
db.and_(
user.c.user_id != None,
timestamp.c.timestamp != None).label('voided'),
user.c.modifier_id.label('modifier_id'),
]
return db.select(columns).where(
user.c.modifier_id == timestamp.c.modifier_id)\
.alias('voided_sub')
@voided.expression
def voided(cls):
return cls._voided_select().c.voided
@declared_attr
def __mapper_args__(cls):
"""SQLAlchemy late-binding attribute to set mapper arguments.
Obviates subclasses from having to specify polymorphic identities.
"""
cls_name = unicode(cls.__name__)
args = {'polymorphic_identity': cls_name}
if cls_name == u'Modifier':
args['polymorphic_on'] = cls._type
return args
def __init__(self, request, user, note, value):
self.user = user
self.note = ensure_unicode(note)
self.value = value
self.request = request
def __repr__(self):
return ("{x.__class__.__name__}({x.request}, {x.user},"
"{x}, {x.voided})".format(x=self, value=self))
def void(self, user):
"""Mark this modifier as void.
:param user: The user voiding this modifier
:type user: :py:class:`~.User`
"""
if self.request.status != ActionType.evaluating:
# TRANS: Error message shown when trying to void (cancel) a
# modifier but the request is not in the evaluating state, so the
# attempt fails.
raise ModifierError(gettext(u"Modifiers can only be voided when "
u"the request is in the evaluating "
u"state."))
if not user.has_permission(PermissionType.review,
self.request.division):
# TRANS: Error message shown when you attempt to void a modifier
# but are prevented from doing so because you do not hold the
# reviewer permission.
raise ModifierError("You must be a reviewer to be able to void "
"modifiers.")
self.voided_user = user
self.voided_timestamp = dt.datetime.utcnow()
@db.validates('request')
def _check_request_status(self, attr, request):
if current_app.config['SRP_SKIP_VALIDATION']:
return request
if request.status != ActionType.evaluating:
raise ModifierError(gettext(u"Modifiers can only be added when the"
u" request is in an evaluating "
u"state."))
if not self.user.has_permission(PermissionType.review,
request.division):
raise ModifierError(gettext(u"Only reviewers can add modifiers."))
return request
def _json(self, extended=False):
try:
parent = super(Modifier, self)._json(extended)
except AttributeError:
parent = {}
parent[u'value'] = self.value
if extended:
parent[u'note'] = self.note or u''
parent[u'timestamp'] = self.timestamp
parent[u'user'] = self.user
if self.voided:
parent[u'void'] = {
u'user': self.voided_user,
u'timestamp': self.voided_timestamp,
}
else:
parent[u'void'] = False
else:
parent[u'void'] = self.voided
return parent
@unistr
class AbsoluteModifier(Modifier):
"""Subclass of :py:class:`Modifier` for representing absolute
modifications.
Absolute modifications are those that are not dependent on the value of
:py:attr:`Request.base_payout`.
"""
id = db.Column(db.Integer, db.ForeignKey('modifier.id'), primary_key=True)
#: How much ISK to add or remove from the payout
value = db.Column(PrettyNumeric(precision=15, scale=2), nullable=False,
default=Decimal(0))
def _json(self, extended=False):
try:
parent = super(AbsoluteModifier, self)._json(extended)
except AttributeError:
parent = {}
parent[u'type'] = 'absolute'
return parent
@unistr
class RelativeModifier(Modifier):
"""Subclass of :py:class:`Modifier` for representing relative modifiers.
Relative modifiers depend on the value of :py:attr:`Modifier.base_payout`
to calculate their effect.
"""
id = db.Column(db.Integer, db.ForeignKey('modifier.id'), primary_key=True)
#: What percentage of the payout to add or remove
value = db.Column(db.Numeric(precision=8, scale=5), nullable=False,
default=Decimal(0))
def _json(self, extended=False):
try:
parent = super(RelativeModifier, self)._json(extended)
except AttributeError:
parent = {}
parent[u'type'] = 'relative'
return parent
class Request(db.Model, AutoID, Timestamped, AutoName):
"""Requests represent SRP requests."""
#: The ID of the :py:class:`~.User` who submitted this request.
submitter_id = db.Column(db.Integer, db.ForeignKey('user.id'))
#: The :py:class:`~.User` who submitted this request.
submitter = db.relationship('User', back_populates='requests',
cascade='save-update,merge,refresh-expire,expunge')
#: The ID of the :py:class`~.Division` this request was submitted to.
division_id = db.Column(db.Integer, db.ForeignKey('division.id'),
nullable=False)
#: The :py:class:`~.Division` this request was submitted to.
division = db.relationship('Division', back_populates='requests',
cascade='save-update,merge,refresh-expire,expunge')
#: A list of :py:class:`Action`\s that have been applied to this request,
#: sorted in the order they were applied.
actions = db.relationship('Action', back_populates='request',
cascade='all,delete-orphan',
order_by='desc(Action.timestamp)')
#: A list of all :py:class:`Modifier`\s that have been applied to this
#: request, regardless of wether they have been voided or not. They're
#: sorted in the order they were added.
modifiers = db.relationship('Modifier', back_populates='request',
cascade='all,delete-orphan',
lazy='dynamic', order_by='desc(Modifier.timestamp)')
#: The URL of the source killmail.
killmail_url = db.Column(db.String(512, convert_unicode=True),
nullable=False)
#: The ID of the :py:class:`~.Pilot` for the killmail.
pilot_id = db.Column(db.Integer, db.ForeignKey('pilot.id'), nullable=False)
#: The :py:class:`~.Pilot` who was the victim in the killmail.
pilot = db.relationship('Pilot', back_populates='requests',
cascade='save-update,merge,refresh-expire,expunge')
#: The corporation of the :py:attr:`pilot` at the time of the killmail.
corporation = db.Column(db.String(150, convert_unicode=True),
nullable=False, index=True)
#: The alliance of the :py:attr:`pilot` at the time of the killmail.
alliance = db.Column(db.String(150, convert_unicode=True), nullable=True,
index=True)
#: The type of ship that was destroyed.
ship_type = db.Column(db.String(75, convert_unicode=True), nullable=False,
index=True)
# TODO: include timezones
#: The date and time of when the ship was destroyed.
kill_timestamp = db.Column(DateTime, nullable=False, index=True)
base_payout = db.Column(PrettyNumeric(precision=15, scale=2),
default=Decimal(0))
"""The base payout for this request.
This value is clamped to a lower limit of 0. It can only be changed when
this request is in an :py:attr:`~ActionType.evaluating` state, or else a
:py:exc:`ModifierError` will be raised.
"""
#: The payout for this requests taking into account all active modifiers.
payout = db.Column(PrettyNumeric(precision=15, scale=2),
default=Decimal(0), index=True, nullable=False)
#: Supporting information for the request.
details = db.deferred(db.Column(db.Text(convert_unicode=True)))
#: The current status of this request
status = db.Column(ActionType.db_type(), nullable=False,
default=ActionType.evaluating)
"""This attribute is automatically kept in sync as :py:class:`Action`\s are
added to the request. It should not be set otherwise.
At the time an :py:class:`Action` is added to this request, the type of
action is checked and the state diagram below is enforced. If the action is
invalid, an :py:exc:`ActionError` is raised.
.. digraph:: request_workflow
rankdir="LR";
sub [label="submitted", shape=plaintext];
node [style="dashed, filled"];
eval [label="evaluating", fillcolor="#fcf8e3"];
rej [label="rejected", style="solid, filled", fillcolor="#f2dede"];
app [label="approved", fillcolor="#d9edf7"];
inc [label="incomplete", fillcolor="#f2dede"];
paid [label="paid", style="solid, filled", fillcolor="#dff0d8"];
sub -> eval;
eval -> rej [label="R"];
eval -> app [label="R"];
eval -> inc [label="R"];
rej -> eval [label="R"];
inc -> eval [label="R, S"];
inc -> rej [label="R"];
app -> paid [label="P"];
app -> eval [label="R"];
paid -> eval [label="P"];
paid -> app [label="P"];
R means a reviewer can make that change, S means the submitter can make
that change, and P means payers can make that change. Solid borders are
terminal states.
"""
#: The solar system this loss occured in.
system = db.Column(db.String(25, convert_unicode=True), nullable=False,
index=True)
#: The constellation this loss occured in.
constellation = db.Column(db.String(25, convert_unicode=True),
nullable=False, index=True)
#: The region this loss occured in.
region = db.Column(db.String(25, convert_unicode=True), nullable=False,
index=True)
@hybrid_property
def finalized(self):
return self.status in ActionType.finalized
@finalized.expression
def finalized(cls):
return db.or_(cls.status == ActionType.paid,
cls.status == ActionType.rejected)
def __init__(self, submitter, details, division, killmail, **kwargs):
"""Create a :py:class:`Request`.
:param submitter: The user submitting this request
:type submitter: :py:class:`~.User`
:param str details: Supporting details for this request
:param division: The division this request is being submitted to
:type division: :py:class:`~.Division`
:param killmail: The killmail this request pertains to
:type killmail: :py:class:`~.Killmail`
"""
with db.session.no_autoflush:
self.division = division
self.details = details
self.submitter = submitter
# Pull basically everything else from the killmail object
# The base Killmail object has an iterator defined that returns tuples
# of Request attributes and values for those attributes
for attr, value in killmail:
setattr(self, attr, value)
# Set default values before a flush
if self.base_payout is None and 'base_payout' not in kwargs:
self.base_payout = Decimal(0)
super(Request, self).__init__(**kwargs)
@db.validates('base_payout')
def _validate_payout(self, attr, value):
"""Ensures that base_payout is positive. The value is clamped to 0."""
if current_app.config['SRP_SKIP_VALIDATION']:
return Decimal(value)
# Allow self.status == None, as the base payout may be set in the
# initializing state before the status has been set.
if self.status == ActionType.evaluating or self.status is None:
if value is None or value < 0:
return Decimal('0')
else:
return Decimal(value)
else:
raise ModifierError(gettext(u"The request must be in the "
u"evaluating state to change the base "
u"payout."))
state_rules = {
ActionType.evaluating: {
ActionType.incomplete: (PermissionType.review,
PermissionType.admin),
ActionType.rejected: (PermissionType.review,
PermissionType.admin),
ActionType.approved: (PermissionType.review,
PermissionType.admin),
},
ActionType.incomplete: {
ActionType.rejected: (PermissionType.review,
PermissionType.admin),
# Special case: the submitter can change it to evaluating by
# changing the division or updating the details.
ActionType.evaluating: (PermissionType.review,
PermissionType.admin),
},
ActionType.rejected: {
ActionType.evaluating: (PermissionType.review,
PermissionType.admin),
},
ActionType.approved: {
# Special case: the submitter can change it to evaluating by
# changing the division.
ActionType.evaluating: (PermissionType.review,
PermissionType.admin),
ActionType.paid: (PermissionType.pay, PermissionType.admin),
},
ActionType.paid: {
ActionType.approved: (PermissionType.pay, PermissionType.admin),
ActionType.evaluating: (PermissionType.pay, PermissionType.admin),
},
}
def valid_actions(self, user):
"""Get valid actions (besides comment) the given user can perform."""
possible_actions = self.state_rules[self.status]
def action_filter(action):
return user.has_permission(possible_actions[action],
self.division)
return filter(action_filter, possible_actions)
@db.validates('status')
def _validate_status(self, attr, new_status):
"""Enforces that status changes follow the status state machine.
When an invalid change is attempted, :py:class:`ActionError` is
raised.
"""
if current_app.config['SRP_SKIP_VALIDATION']:
return new_status
if new_status == ActionType.comment:
raise ValueError(gettext(
u"Comment is not a valid status"))
# Initial status
if self.status is None:
return new_status
rules = self.state_rules[self.status]
if new_status not in rules:
error_text = gettext(u"%(new_status)s is not a valid status to "
u"change to from %(old_status)s.",
new_status=new_status,
old_status=self.status)
raise ActionError(error_text)
return new_status
@db.validates('actions')
def _verify_action_permissions(self, attr, action):
"""Verifies that permissions for Actions being added to a Request."""
if current_app.config['SRP_SKIP_VALIDATION']:
return action
if action.type_ is None:
# Action.type_ are not nullable, so rely on the fact that it will
# be set later to let it slide now.
return action
elif action.type_ != ActionType.comment:
# Peek behind the curtain to see the history of the status
# attribute.
status_history = db.inspect(self).attrs.status.history
if status_history.has_changes():
new_status = status_history.added[0]
old_status = status_history.deleted[0]
else:
new_status = action.type_
old_status = self.status
rules = self.state_rules[old_status]
permissions = rules[new_status]
# Handle the special cases called out in state_rules
if action.user == self.submitter and \
new_status == ActionType.evaluating and \
old_status in ActionType.pending:
# Equivalent to self.status in (approved, incomplete) as
# going from evaluating to evaluating is invalid (as checked by
# the status validator).
return action
if not action.user.has_permission(permissions, self.division):
raise ActionError(gettext(u"Insufficient permissions to "
u"perform that action."))
elif action.type_ == ActionType.comment:
if action.user != self.submitter \
and not action.user.has_permission(
(PermissionType.review, PermissionType.pay,
PermissionType.admin),
self.division):
raise ActionError(gettext(u"You must either own or have "
u"special privileges to comment on "
u"this request."))
return action
def __repr__(self):
return "{x.__class__.__name__}({x.submitter}, {x.division}, {x.id})".\
format(x=self)
@property
def transformed(self):
"""Get a special HTML representation of an attribute.
Divisions can have a transformer defined on various attributes that
output a URL associated with that attribute. This property provides
easy access to the output of any transformed attributes on this
request.
"""
class RequestTransformer(object):
def __init__(self, request):
self._request = request
def __getattr__(self, attr):
raw_value = getattr(self._request, attr)
if attr in self._request.division.transformers:
transformer = self._request.division.transformers[attr]
return Markup(u'<a href="{link}" target="_blank">'
u'{value} <i class="fa fa-external-link">'
u'</i></a>').format(
link=transformer(raw_value),
value=str(raw_value))
else:
return raw_value
def __iter__(self):
for attr, transformer in\
self._request.division.transformers.items():
if attr == 'ship_type':
yield ('ship', transformer(getattr(self._request,
attr)))
else:
yield (attr, transformer(getattr(self._request, attr)))
return RequestTransformer(self)
def _json(self, extended=False):
try:
parent = super(Request, self)._json(extended)
except AttributeError:
parent = {}
parent[u'href'] = url_for('requests.get_request_details',
request_id=self.id)
attrs = (u'killmail_url', u'kill_timestamp', u'pilot',
u'alliance', u'corporation', u'submitter',
u'division', u'status', u'base_payout', u'payout',
u'details', u'id', u'ship_type', u'system', u'constellation',
u'region')
for attr in attrs:
if attr == u'ship_type':
parent['ship'] = self.ship_type
elif u'payout' in attr:
payout = getattr(self, attr)
parent[attr] = payout.currency()
else:
parent[attr] = getattr(self, attr)
parent[u'submit_timestamp'] = self.timestamp
if extended:
parent[u'actions'] = map(lambda a: a._json(True), self.actions)
parent[u'modifiers'] = map(lambda m: m._json(True), self.modifiers)
parent[u'valid_actions'] = self.valid_actions(current_user)
parent[u'transformed'] = dict(self.transformed)
return parent
# Define event listeners for syncing the various denormalized attributes
@listens_for(Action.type_, 'set')
def _action_type_to_request_status(action, new_status, old_status, initiator):
"""Set the Action's Request's status when the Action's type is changed."""
if action.request is not None and new_status != ActionType.comment:
action.request.status = new_status
@listens_for(Request.actions, 'append')
def _request_status_from_actions(srp_request, action, initiator):
"""Updates Request.status when new Actions are added."""
# Pass when Action.type_ is None, as it'll get updated later
if action.type_ is not None and action.type_ != ActionType.comment:
srp_request.status = action.type_
@listens_for(Request.base_payout, 'set')
def _recalculate_payout_from_request(srp_request, base_payout, *args):
"""Recalculate a Request's payout when the base payout changes."""
if base_payout is None:
base_payout = Decimal(0)
voided = Modifier._voided_select()
modifiers = srp_request.modifiers.join(voided,
voided.c.modifier_id==Modifier.id)\
.filter(~voided.c.voided)\
.order_by(False)
absolute = modifiers.join(AbsoluteModifier).\
with_entities(db.func.sum(AbsoluteModifier.value)).\
scalar()
if not isinstance(absolute, Decimal):
absolute = Decimal(0)
relative = modifiers.join(RelativeModifier).\
with_entities(db.func.sum(RelativeModifier.value)).\
scalar()
if not isinstance(relative, Decimal):
relative = Decimal(0)
payout = (base_payout + absolute) * (Decimal(1) + relative)
srp_request.payout = PrettyDecimal(payout)
@listens_for(Modifier.request, 'set', propagate=True)
@listens_for(Modifier.voided_user, 'set', propagate=True)
def _recalculate_payout_from_modifier(modifier, value, *args):
"""Recalculate a Request's payout when it gains a Modifier or when one of
its Modifiers is voided.
"""
# Force a flush at the beginning, then delay other flushes
db.session.flush()
with db.session.no_autoflush:
# Get the request for this modifier
if isinstance(value, Request):
# Triggered by setting Modifier.request
srp_request = value
else:
# Triggered by setting Modifier.voided_user
srp_request = modifier.request
voided = Modifier._voided_select()
modifiers = srp_request.modifiers.join(voided,
voided.c.modifier_id==Modifier.id)\
.filter(~voided.c.voided)\
.order_by(False)
absolute = modifiers.join(AbsoluteModifier).\
with_entities(db.func.sum(AbsoluteModifier.value)).\
scalar()
if not isinstance(absolute, Decimal):
absolute = Decimal(0)
relative = modifiers.join(RelativeModifier).\
with_entities(db.func.sum(RelativeModifier.value)).\
scalar()
if not isinstance(relative, Decimal):
relative = Decimal(0)
# The modifier that's changed isn't reflected yet in the database, so we
# apply it here.
if isinstance(value, Request):
# A modifier being added to the Request
if modifier.voided:
# The modifier being added is already void
return
direction = Decimal(1)
else:
# A modifier already on a request is being voided
direction = Decimal(-1)
if isinstance(modifier, AbsoluteModifier):
absolute += direction * modifier.value
elif isinstance(modifier, RelativeModifier):
relative += direction * modifier.value
payout = (srp_request.base_payout + absolute) * \
(Decimal(1) + relative)
srp_request.payout = PrettyDecimal(payout)
# The next few lines are responsible for adding a full text search index on the
# Request.details column for MySQL.
_create_fts = DDL('CREATE FULLTEXT INDEX ix_%(table)s_details_fulltext '
'ON %(table)s (details);')
_drop_fts = DDL('DROP INDEX ix_%(table)s_details_fulltext ON %(table)s')
event.listen(
Request.__table__,
'after_create',
_create_fts.execute_if(dialect='mysql')
)
event.listen(
Request.__table__,
'before_drop',
_drop_fts.execute_if(dialect='mysql')
) | PypiClean |
/Hyperion-0.9.10.tar.gz/Hyperion-0.9.10/hyperion/grid/amr_grid.py | from __future__ import print_function, division
import os
import struct
import hashlib
from copy import deepcopy
import h5py
import numpy as np
from ..util.meshgrid import meshgrid_nd
from ..util.functions import FreezableClass, link_or_copy
from astropy import log as logger
from .grid_helpers import single_grid_dims
def zero_density(grid, xmin=-np.inf, xmax=np.inf, ymin=-np.inf, ymax=np.inf, zmin=np.inf, zmax=np.inf):
for ilevel, level in enumerate(grid.levels):
for igrid, grid in enumerate(level.grids):
wx = np.linspace(grid.xmin, grid.xmax, grid.nx + 1)
wy = np.linspace(grid.ymin, grid.ymax, grid.ny + 1)
wz = np.linspace(grid.zmin, grid.zmax, grid.nz + 1)
x = 0.5 * (wx[:-1] + wx[1:])
y = 0.5 * (wy[:-1] + wy[1:])
z = 0.5 * (wz[:-1] + wz[1:])
gx, gy, gz = meshgrid_nd(x, y, z)
reset = (gx < xmin) | (gx > xmax) | (gy < ymin) | (gy > ymax) | (gz < zmin) | (gz > zmax)
grid.data[reset] = 0.
return grid
class Grid(FreezableClass):
def __init__(self):
# The 3D data arrays (can have various components)
self.quantities = {}
# The boundaries of the 3D grid in real space
self.xmin, self.xmax = None, None
self.ymin, self.ymax = None, None
self.zmin, self.zmax = None, None
# The dimensions of the array
self.nx, self.ny, self.nz = None, None, None
self._freeze()
def __getattr__(self, attribute):
if attribute == 'shape':
return (self.nz, self.ny, self.nx)
else:
return FreezableClass.__getattribute__(self, attribute)
class Level(FreezableClass):
def __init__(self):
# The list of grids in the level
self.grids = []
self._freeze()
def add_grid(self):
grid = Grid()
self.grids.append(grid)
return grid
class AMRGrid(FreezableClass):
'''
An AMR grid.
Levels are stored in the ``levels`` attribute, which is a list of
:class:`hyperion.grid.amr_grid.Level` objects, which in turn
contain a ``grids`` attribute which is a list of
:class:`~hyperion.grid.amr_grid.Grid` objects.
Levels can be added with::
level = amr.add_level()
And grids can be added to a level with::
grid = level.add_grid()
Grid objects have the following attributes which should be set:
* ``xmin`` - lower x position of the grid
* ``xmax`` - upper x position of the grid
* ``ymin`` - lower y position of the grid
* ``ymax`` - upper y position of the grid
* ``zmin`` - lower z position of the grid
* ``zmax`` - upper z position of the grid
* ``nx`` - number of cells in x direction
* ``ny`` - number of cells in y direction
* ``nz`` - number of cells in z direction
* ``quantities`` - a dictionary containing physical quantities (see below)
:class:`~hyperion.grid.AMRGrid` objects may contain multiple
quantities (e.g. density, specific energy). To access these, you can
specify the name of the quantity as an item::
>>> grid['density']
which is no longer an :class:`~hyperion.grid.AMRGrid` object, but
a :class:`~hyperion.grid.AMRGridView` object. When setting
this for the first time, this can be set either to another
:class:`~hyperion.grid.AMRGridView` object, an external h5py
link, or an empty list. For example, the following should work:
>>> grid['density_new'] = grid['density']
:class:`~hyperion.grid.AMRGridView` objects allow the
specific dust population to be selected as an index:
>>> grid['density'][0]
Which is also an :class:`~hyperion.grid.AMRGridView` object.
'''
def __init__(self, amr_grid=None):
# Initalize AMR levels
self.levels = []
self._freeze()
# Copy geometry if provided
if amr_grid is not None:
for level in amr_grid.levels:
level_ref = self.add_level()
for grid in level.grids:
grid_ref = level_ref.add_grid()
grid_ref.nx = grid.nx
grid_ref.ny = grid.ny
grid_ref.nz = grid.nz
grid_ref.xmin, grid_ref.xmax = grid.xmin, grid.xmax
grid_ref.ymin, grid_ref.ymax = grid.ymin, grid.ymax
grid_ref.zmin, grid_ref.zmax = grid.zmin, grid.zmax
grid_ref.quantities = {}
def remove_level(self, level_id):
self.levels.pop(level_id)
def add_level(self):
level = Level()
self.levels.append(level)
return level
def __getattr__(self, attribute):
if attribute == 'n_dust':
n_dust = None
for level in self.levels:
for grid in level.grids:
for quantity in grid.quantities:
n_dust_q, shape_q = single_grid_dims(grid.quantities[quantity])
if n_dust is None:
n_dust = n_dust_q
elif n_dust_q is not None:
if n_dust != n_dust_q:
raise ValueError("Not all dust lists in the grid have the same size")
return n_dust
else:
return FreezableClass.__getattribute__(self, attribute)
def _check_array_dimensions(self, amr_grid=None):
'''
Check that a grid's array dimensions agree with this grid's metadata
Parameters
----------
amr_grid : AMR grid, optional
The AMR grid for which to test the array dimensions. If this is not
specified, this method performs a self-consistency check of array
dimensions and meta-data.
'''
# If no grid is specified, do a self-consistency check
if amr_grid is None:
amr_grid = self
n_pop_ref = None
# Loop over levels
for ilevel, level_ref in enumerate(self.levels):
# Read in level
level = amr_grid.levels[ilevel]
# Loop over grids
for igrid, grid_ref in enumerate(level_ref.grids):
# Read in grid
grid = level.grids[igrid]
# Loop over quantities
for quantity in grid.quantities:
n_pop, shape = single_grid_dims(grid.quantities[quantity])
if shape != grid_ref.shape:
raise ValueError("Quantity arrays do not have the right "
"dimensions: %s instead of %s"
% (shape, grid_ref.shape))
if n_pop is not None:
if n_pop_ref is None:
n_pop_ref = n_pop
elif n_pop != n_pop_ref:
raise ValueError("Not all dust lists in the grid have the same size")
def read(self, group, quantities='all'):
'''
Read the geometry and physical quantities from an AMR grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the grid from. This group should contain
groups named 'Geometry' and 'Quantities'.
quantities : 'all' or list
Which physical quantities to read in. Use 'all' to read in all
quantities or a list of strings to read only specific quantities.
'''
# Read in geometry
self.read_geometry(group['Geometry'])
# Read in physical quantities
self.read_quantities(group['Quantities'], quantities=quantities)
# Self-consistently check geometry and physical quantities
self._check_array_dimensions()
def read_geometry(self, group):
'''
Read in geometry information from an AMR grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the geometry from
'''
# Check that grid is indeed AMR
if group.attrs['grid_type'].decode('utf-8') != 'amr':
raise Exception("Grid is not an AMR grid")
# Initialize levels list
self.levels = []
# Loop over levels
for ilevel in range(group.attrs['nlevels']):
# Read in level
level_path = 'level_%05i' % (ilevel + 1)
g_level = group[level_path]
# Initialize level
level = self.add_level()
# Loop over grids
for igrid in range(g_level.attrs['ngrids']):
# Read in grid
grid_path = 'grid_%05i' % (igrid + 1)
g_grid = g_level[grid_path]
# Initialize grid
grid = level.add_grid()
# Retrieve real-world grid boundaries
grid.xmin = g_grid.attrs['xmin']
grid.xmax = g_grid.attrs['xmax']
grid.ymin = g_grid.attrs['ymin']
grid.ymax = g_grid.attrs['ymax']
grid.zmin = g_grid.attrs['zmin']
grid.zmax = g_grid.attrs['zmax']
# Retrieve grid dimensions
grid.nx = int(g_grid.attrs['n1'])
grid.ny = int(g_grid.attrs['n2'])
grid.nz = int(g_grid.attrs['n3'])
# Check that advertised hash matches real hash
if group.attrs['geometry'].decode('utf-8') != self.get_geometry_id():
raise Exception("Calculated geometry hash does not match hash in file")
def read_quantities(self, group, quantities='all'):
'''
Read in physical quantities from an AMR grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the grid quantities from
quantities : 'all' or list
Which physical quantities to read in. Use 'all' to read in all
quantities or a list of strings to read only specific quantities.
'''
# Loop over levels
for ilevel, level in enumerate(self.levels):
# Read in level
level_path = 'level_%05i' % (ilevel + 1)
# Loop over grids
for igrid, grid in enumerate(level.grids):
# Read in grid
grid_path = 'grid_%05i' % (igrid + 1)
# Read in desired quantities
g_grid_quantities = group[level_path][grid_path]
for quantity in g_grid_quantities:
if quantities == 'all' or quantity in quantities:
array = np.array(g_grid_quantities[quantity])
if array.ndim == 4: # if array is 4D, it is a list of 3D arrays
grid.quantities[quantity] = [array[i] for i in range(array.shape[0])]
else:
grid.quantities[quantity] = array
# Self-consistently check geometry and physical quantities
self._check_array_dimensions()
def write(self, group, quantities='all', copy=True, absolute_paths=False, compression=True, wall_dtype=float, physics_dtype=float):
'''
Write out the AMR grid
Parameters
----------
group : h5py.Group
The HDF5 group to write the grid to
quantities : 'all' or list
Which physical quantities to write out. Use 'all' to write out all
quantities or a list of strings to write only specific quantities.
copy : bool
Whether to copy external links, or leave them as links.
absolute_paths : bool
If copy is False, then this indicates whether to use absolute or
relative paths for links.
compression : bool
Whether to compress the arrays in the HDF5 file
wall_dtype : type
The datatype to use to write the wall positions
physics_dtype : type
The datatype to use to write the physical quantities
'''
# Create HDF5 groups if needed
if 'Geometry' not in group:
g_geometry = group.create_group('Geometry')
else:
g_geometry = group['Geometry']
if 'Quantities' not in group:
g_quantities = group.create_group('Quantities')
else:
g_quantities = group['Quantities']
g_geometry.attrs['grid_type'] = np.string_('amr'.encode('utf-8'))
g_geometry.attrs['nlevels'] = len(self.levels)
# Self-consistently check geometry and physical quantities
self._check_array_dimensions()
# Write out physical quantities
# Loop over levels
for ilevel, level in enumerate(self.levels):
# Read in level
level_path = 'level_%05i' % (ilevel + 1)
g_level = g_geometry.create_group(level_path)
q_level = g_quantities.create_group(level_path)
g_level.attrs['ngrids'] = len(level.grids)
# Loop over grids
for igrid, grid in enumerate(level.grids):
# Read in grid
grid_path = 'grid_%05i' % (igrid + 1)
g_grid = g_level.create_group(grid_path)
q_grid = q_level.create_group(grid_path)
# Write real-world grid boundaries
g_grid.attrs['xmin'] = grid.xmin
g_grid.attrs['xmax'] = grid.xmax
g_grid.attrs['ymin'] = grid.ymin
g_grid.attrs['ymax'] = grid.ymax
g_grid.attrs['zmin'] = grid.zmin
g_grid.attrs['zmax'] = grid.zmax
# Wrote grid dimensions
g_grid.attrs['n1'] = grid.nx
g_grid.attrs['n2'] = grid.ny
g_grid.attrs['n3'] = grid.nz
# Write out physical quantities
for quantity in grid.quantities:
if quantities == 'all' or quantity in quantities:
if isinstance(grid.quantities[quantity], h5py.ExternalLink):
link_or_copy(q_grid, quantity, grid.quantities[quantity], copy, absolute_paths=absolute_paths)
else:
q_grid.create_dataset(quantity, data=grid.quantities[quantity],
compression=compression,
dtype=physics_dtype)
g_geometry.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))
def write_single_array(self, group, name, amr_grid, copy=True, absolute_paths=False, compression=True, physics_dtype=float):
'''
Write out a single quantity, checking for consistency with geometry
Parameters
----------
group : h5py.Group
The HDF5 group to write the grid to
name : str
The name of the array in the group
amr_grid : AMRGridView
The array to write out
copy : bool
Whether to copy external links, or leave them as links.
absolute_paths : bool
If copy is False, then this indicates whether to use absolute or
relative paths for links.
compression : bool
Whether to compress the arrays in the HDF5 file
wall_dtype : type
The datatype to use to write the wall positions
physics_dtype : type
The datatype to use to write the physical quantities
'''
if not isinstance(amr_grid, AMRGridView):
raise ValueError("amr_grid should be an AMRGridView instance")
# Loop over levels
for ilevel, level in enumerate(self.levels):
# Read in level
level_path = 'level_%05i' % (ilevel + 1)
if level_path in group:
q_level = group[level_path]
else:
q_level = group.create_group(level_path)
# Loop over grids
for igrid, grid in enumerate(level.grids):
# Read in grid
grid_path = 'grid_%05i' % (igrid + 1)
if grid_path in q_level:
q_grid = q_level[grid_path]
else:
q_grid = q_level.create_group(grid_path)
# Write out physical quantities
array = amr_grid.levels[ilevel].grids[igrid].quantities[amr_grid.viewed_quantity]
if isinstance(array, h5py.ExternalLink):
link_or_copy(q_grid, name, array, copy, absolute_paths=absolute_paths)
else:
q_grid.create_dataset(name, data=array,
compression=compression,
dtype=physics_dtype)
def get_geometry_id(self):
geo_hash = hashlib.md5()
for level in self.levels:
for grid in level.grids:
geo_hash.update(struct.pack('>d', grid.xmin))
geo_hash.update(struct.pack('>d', grid.xmax))
geo_hash.update(struct.pack('>d', grid.ymin))
geo_hash.update(struct.pack('>d', grid.ymax))
geo_hash.update(struct.pack('>d', grid.zmin))
geo_hash.update(struct.pack('>d', grid.zmax))
geo_hash.update(struct.pack('>q', grid.nx))
geo_hash.update(struct.pack('>q', grid.ny))
geo_hash.update(struct.pack('>q', grid.nz))
return geo_hash.hexdigest()
def __getitem__(self, item):
return AMRGridView(self, item)
def __setitem__(self, item, value):
if isinstance(value, AMRGridView):
if self.levels == [] and value.levels != []:
logger.warning("No geometry in target grid - copying from original grid")
for level in value.levels:
level_ref = self.add_level()
for grid in level.grids:
grid_ref = level_ref.add_grid()
grid_ref.nx = grid.nx
grid_ref.ny = grid.ny
grid_ref.nz = grid.nz
grid_ref.xmin, grid_ref.xmax = grid.xmin, grid.xmax
grid_ref.ymin, grid_ref.ymax = grid.ymin, grid.ymax
grid_ref.zmin, grid_ref.zmax = grid.zmin, grid.zmax
grid_ref.quantities = {}
for ilevel, level_ref in enumerate(self.levels):
level = value.levels[ilevel]
for igrid, grid_ref in enumerate(level_ref.grids):
grid = level.grids[igrid]
grid_ref.quantities[item] = deepcopy(grid.quantities[value.viewed_quantity])
elif isinstance(value, h5py.ExternalLink):
filename = value.filename
base_path = os.path.dirname(value.path)
array_name = os.path.basename(value.path)
for ilevel, level_ref in enumerate(self.levels):
level_path = 'level_%05i' % (ilevel + 1)
for igrid, grid_ref in enumerate(level_ref.grids):
grid_path = 'grid_%05i' % (ilevel + 1)
grid_ref.quantities[item] = h5py.ExternalLink(filename, os.path.join(base_path, level_path, grid_path, array_name))
elif value == []:
for level in self.levels:
for grid in level.grids:
grid.quantities[item] = []
else:
raise ValueError('value should be an empty list or an AMRGridView instance')
def __contains__(self, item):
if len(self.levels) > 0:
if len(self.levels[0].grids) > 0:
return item in self.levels[0].grids[0].quantities
else:
return False
else:
return False
def reset_quantities(self):
self.quantities = {}
for level in self.levels:
for grid in level.grids:
grid.quantities = {}
def add_derived_quantity(self, name, function):
for level in self.levels:
for grid in level.grids:
if name in grid.quantities:
raise KeyError(name + ' already exists')
function(grid.quantities)
def to_yt(self, dust_id=0):
'''
Convert AMR grid to a yt object (requires yt)
Parameters
----------
dust_id : int, optional
The ID of the dust population to extract. If not set, this
defaults to 0 (the first dust population).
'''
from .yt_wrappers import amr_grid_to_yt_stream
return amr_grid_to_yt_stream(self.levels, dust_id)
@classmethod
def from_yt(cls, ds, quantity_mapping={}):
"""
Convert a yt dataset to a Hyperion AMRGrid object
.. note:: This method requires yt 3.0 or later
Parameters
----------
ds : yt Dataset
The yt dataset
quantity_mapping : dict
A dictionary mapping the name of the quantity to use in Hyperion (the
key) to the name of the field to extract in yt (the value).
Notes
-----
The domain is always re-centered so that the position at
ds.domain_center in yt becomes the origin in Hyperion.
Examples
--------
Assuming that your dust opacities are defined per unit gas mass, and the
simulation density is given in gas densities, converting is
straightfoward (in this case we assume the density field is called
``('gas', 'density')``)::
>>> from yt import load
>>> from hyperion.grid import AMRGrid
>>> ds = load('DD0010/moving7_0010')
>>> amr = AMRGrid.from_yt(ds, quantity_mapping={'density':('gas', 'density')})
However, you will need to take care if your dust opacities are defined
in dust mass units. If the yt dataset does not contain dust densities,
you can add a field yourself, for example::
>>> from yt import load
>>> from hyperion.grid import AMRGrid
>>> ds = load('DD0010/moving7_0010')
>>> def _dust_density(field, data):
... return data[('gas', 'density')].in_units('g/cm**3') * 0.01
>>> ds.add_field(('gas', 'dust_density'), function=_dust_density, units='g/cm**3')
>>> amr = AMRGrid.from_yt(ds, quantity_mapping={'density':('gas', 'dust_density')})
"""
import yt
from distutils.version import LooseVersion
if not LooseVersion(yt.__version__) >= LooseVersion('3'):
raise ImportError("yt 3.0 or later is required")
from .yt_wrappers import yt_dataset_to_amr_grid
return yt_dataset_to_amr_grid(ds, quantity_mapping=quantity_mapping)
class AMRGridView(AMRGrid):
def __init__(self, amr_grid, quantity):
self.viewed_quantity = quantity
AMRGrid.__init__(self)
for level_ref in amr_grid.levels:
level = self.add_level()
for grid_ref in level_ref.grids:
grid = level.add_grid()
grid.nx = grid_ref.nx
grid.ny = grid_ref.ny
grid.nz = grid_ref.nz
grid.xmin, grid.xmax = grid_ref.xmin, grid_ref.xmax
grid.ymin, grid.ymax = grid_ref.ymin, grid_ref.ymax
grid.zmin, grid.zmax = grid_ref.zmin, grid_ref.zmax
grid.quantities = {}
grid.quantities[quantity] = grid_ref.quantities[quantity]
def append(self, amr_grid_view):
'''
Used to append quantities from another grid
Parameters
----------
amr_grid : AMRGridView instance
The grid to copy the quantity from
'''
if not isinstance(amr_grid_view, AMRGridView):
raise ValueError("amr_grid_view should be an AMRGridView instance")
self._check_array_dimensions(amr_grid_view[amr_grid_view.viewed_quantity])
for ilevel, level_ref in enumerate(self.levels):
level = amr_grid_view.levels[ilevel]
for igrid, grid_ref in enumerate(level_ref.grids):
grid = level.grids[igrid]
if grid_ref.quantities[self.viewed_quantity] is grid.quantities[amr_grid_view.viewed_quantity]:
raise Exception("Calling append recursively")
if type(grid.quantities[amr_grid_view.viewed_quantity]) is list:
raise Exception("Can only append a single grid")
grid_ref.quantities[self.viewed_quantity].append(deepcopy(grid.quantities[amr_grid_view.viewed_quantity]))
def add(self, amr_grid_view):
'''
Used to add quantities from another grid
Parameters
----------
amr_grid : AMRGridView instance
The grid to copy the quantity from
'''
if not isinstance(amr_grid_view, AMRGridView):
raise ValueError("amr_grid_view should be an AMRGridView instance")
self._check_array_dimensions(amr_grid_view[amr_grid_view.viewed_quantity])
for ilevel, level_ref in enumerate(self.levels):
level = amr_grid_view.levels[ilevel]
for igrid, grid_ref in enumerate(level_ref.grids):
grid = level.grids[igrid]
if type(grid_ref.quantities[self.viewed_quantity]) is list:
raise Exception("need to first specify the item to add to")
if type(grid.quantities[amr_grid_view.viewed_quantity]) is list:
raise Exception("need to first specify the item to add")
grid_ref.quantities[self.viewed_quantity] += grid.quantities[amr_grid_view.viewed_quantity]
def __getitem__(self, item):
if type(item) is int:
amr_grid = AMRGridView(self, self.viewed_quantity)
for level in amr_grid.levels:
for grid in level.grids:
grid.quantities = {amr_grid.viewed_quantity: grid.quantities[amr_grid.viewed_quantity][item]}
return amr_grid
else:
return AMRGrid.__getitem__(self, item) | PypiClean |
/GloboNetworkAPI-0.9.6.tar.gz/GloboNetworkAPI-0.9.6/networkapiclient/ApiIPv4.py | from networkapiclient.ApiGenericClient import ApiGenericClient
from networkapiclient.utils import build_uri_with_ids
class ApiIPv4(ApiGenericClient):
def __init__(self, networkapi_url, user, password, user_ldap=None, request_context=None):
"""Class constructor receives parameters to connect to the networkAPI.
:param networkapi_url: URL to access the network API.
:param user: User for authentication.
:param password: Password for authentication.
"""
super(ApiIPv4, self).__init__(
networkapi_url,
user,
password,
user_ldap,
request_context
)
def search(self, **kwargs):
"""
Method to search ipv4's based on extends search.
:param search: Dict containing QuerySets to find ipv4's.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing ipv4's
"""
return super(ApiIPv4, self).get(self.prepare_url('api/v3/ipv4/',
kwargs))
def get(self, ids, **kwargs):
"""
Method to get ipv4's by their ids
:param ids: List containing identifiers of ipv4's
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing ipv4's
"""
url = build_uri_with_ids('api/v3/ipv4/%s/', ids)
return super(ApiIPv4, self).get(self.prepare_url(url, kwargs))
def delete(self, ids):
"""
Method to delete ipv4's by their ids
:param ids: Identifiers of ipv4's
:return: None
"""
url = build_uri_with_ids('api/v3/ipv4/%s/', ids)
return super(ApiIPv4, self).delete(url)
def update(self, ipv4s):
"""
Method to update ipv4's
:param ipv4s: List containing ipv4's desired to updated
:return: None
"""
data = {'ips': ipv4s}
ipv4s_ids = [str(ipv4.get('id')) for ipv4 in ipv4s]
return super(ApiIPv4, self).put('api/v3/ipv4/%s/' %
';'.join(ipv4s_ids), data)
def create(self, ipv4s):
"""
Method to create ipv4's
:param ipv4s: List containing ipv4's desired to be created on database
:return: None
"""
data = {'ips': ipv4s}
return super(ApiIPv4, self).post('api/v3/ipv4/', data) | PypiClean |
/Ideal%20Engine-1.0.2.tar.gz/Ideal Engine-1.0.2/engine/pickle.py | import sys
import argparse
from time import sleep
import json
import pickle
from os import path
def getData():
with open('title.txt', 'rb') as readTitle:
titleList = pickle.load(readTitle)
readTitle.close()
with open('bio.txt', 'rb') as readBio:
bioList = pickle.load(readBio)
readBio.close()
print(titleList)
return titleList, bioList
def display():
if(path.exists("title.txt")):
title, bio = getData()
print("Retrieving Data ...")
sleep(1)
print(title)
for _ in range(len(title)):
print(str(_) + ". Title: " + title[_])
print(str(_) + ". Details: " + bio[_])
else:
print("File does not exist. create new entry.")
def newEntry():
if(path.exists("title.txt")):
title, bio = getData()
else:
title = []
bio = []
name = input("Enter project name: ")
detail = input("Enter project description: ")
msg = sys.stdin.readlines()
desc = '\t'.join(msg)
print("Writing to files ... ")
sleep(1)
title.append(name)
bio.append(desc)
with open("title.txt", "ab+") as header:
pickle.dump(title, header)
header.close()
with open("bio.txt", "ab+") as footer:
pickle.dump(bio, footer)
footer.close()
parser = argparse.ArgumentParser(description="Commands to add/display projects.")
#parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
#parser_display = subparsers.add_parser('display', help='Display entries')
#parser_display.set_defaults(func=display)
#parser_new = subparsers.add_parser('new', help='Add new entries')
#parser_new.set_defaults(func=newEntry)
parser.add_argument("-v", "--version", help="show program version", action="store_true")
parser.add_argument("-n", "--new", help='Add new entries', action="store_true")
parser.add_argument("-d", "--display", help='Display entries', action="store_true")
if len(sys.argv) <= 1:
sys.argv.append('--help')
options = parser.parse_args()
# Run the appropriate function (in this case display or new)
#options.func()
if options.version:
print("This is myprogram version 0.1")
elif options.new:
newEntry()
elif options.display:
display() | PypiClean |
/Naughty_and_Nice-7.7.1-py3-none-any.whl/naughty_and_nice/naughtyandnice.py | import tweepy
import json
import re
import string
import random
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import nltk.classify.util
from nltk.classify import NaiveBayesClassifier
##SET UP TWITTER
#change this:
with open('/home/pi/Documents/twitter_auth.json') as f:
keys = json.load(f)
# -----------
## Or On Windows ##
#with open (r'C:\Users\username\path_to\twitter_auth.json')
# keys = json.load(f)
consumer_key = keys['consumer_key']
consumer_secret = keys['consumer_secret']
access_token = keys['access_token']
access_token_secret = keys['access_token_secret']
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
##TWITTER STREAM
class MyStreamListener(tweepy.StreamListener):
def on_status(self, status):
tweets.append(status.text.rstrip())
print(status.text.rstrip())
if len(tweets) > int(howManyTweets):
myStream.disconnect()
class naughty_and_nice():
def __init__(self, tweets):
#myStream = tweepy.Stream(auth = api.auth, listener=myStreamListener)
##EMOJI DATA
self.pos_emojis = [chr(uni) for uni in [128537, 10084, 128525, 128147, 128535, 9786, 128522, 128539, 128149, 128512, 128515, 128538]]
self.neg_emojis = [chr(uni) for uni in [9785, 128533, 128553, 128530, 128544, 128528, 128550, 128547, 128555, 128534, 128542, 128148, 128546, 128543]]
self.all_emojis = self.pos_emojis + self.neg_emojis
##FETCH SOME TWEETS
myStream.filter(track=self.all_emojis, languages=['en'])
##MAKE SELF.TWEETS THE SAME AS TWEETS
self.tweets = tweets
##RUN THE BOT
self.bot()
##GENERATE EXIT CODE
self.exit_code = random.randint(100,1000)
print("PERFECT RUN! EXIT CODE: " + str(self.exit_code))
print("Done")
##STORE TWEETS
def store_tweets(self, file, tweets):
with open(self.file, 'w') as f:
f.writelines("Your next run")
with open(self.file, 'r') as f:
self.old_tweets = f.readlines()
self.all_tweets = self.old_tweets + self.tweets
self.all_tweets = list(set(self.all_tweets))
self.all_tweets = [self.tweet.replace('\n','')+"\n" for self.tweet in self.all_tweets]
with open(self.file, 'w') as f:
f.writelines(self.all_tweets)
return self.all_tweets
##CLEAN TWEETS
def clean_tweets(self, tweets):
self.tweets = [self.tweet.rstrip() for self.tweet in self.tweets]
self.tweets = [re.sub(r'http\S+', '', self.tweet) for self.tweet in self.tweets]
self.tweets = [re.sub(r'@\S+', '', self.tweet) for self.tweet in self.tweets]
self.tweets = [self.tweet.translate({ord(char): '' for char in string.punctuation}) for self.tweet in self.tweets]
return self.tweets
##SORT TWEETS
def sort_tweets(self, tweets):
self.positive_tweets = [self.tweet for self.tweet in self.tweets if set(self.tweet) & set(self.pos_emojis)]
self.negative_tweets = [self.tweet for self.tweet in self.tweets if set(self.tweet) & set(self.neg_emojis)]
self.positive_tweets = [re.sub(r'[^\x00-\x7F]+','', self.tweet) for self.tweet in self.positive_tweets]
self.negative_tweets = [re.sub(r'[^\x00-\x7F]+','', self.tweet) for self.tweet in self.negative_tweets]
return self.positive_tweets, self.negative_tweets
##PARSE TWEETS
def parse_tweets(self, words):
self.words = words.lower()
self.words = word_tokenize(self.words)
self.words = [self.word for self.word in self.words if self.word not in stopwords.words("english")]
self.word_dictionary = dict([(self.word, True) for self.word in self.words])
return self.word_dictionary
##TRAIN THE CLASSIFIER
def train_classifier(self, positive_tweets, negative_tweets):
self.positive_tweets = [(self.parse_tweets(self.tweet),'positive') for self.tweet in self.positive_tweets]
self.negative_tweets = [(self.parse_tweets(self.tweet),'negative') for self.tweet in self.negative_tweets]
self.fraction_pos = round(len(self.positive_tweets) * 0.8)
self.fraction_neg = round(len(self.negative_tweets) * 0.8)
self.train_set = self.negative_tweets[:self.fraction_pos] + self.positive_tweets[:self.fraction_pos]
self.test_set = self.negative_tweets[self.fraction_neg:] + self.positive_tweets[self.fraction_neg:]
self.classifier = NaiveBayesClassifier.train(self.train_set)
self.accuracy = nltk.classify.util.accuracy(self.classifier, self.test_set)
return self.classifier, self.accuracy
##CALCULATING NAUGHTINESS
def calculate_naughty(self, classifier, accuracy, user):
self.user_tweets = api.user_timeline(screen_name = self.user, count=200)
self.user_tweets = [self.tweet.text for self.tweet in self.user_tweets]
self.user_tweets = self.clean_tweets(self.user_tweets)
self.rating = [self.classifier.classify(self.parse_tweets(self.tweet)) for self.tweet in self.user_tweets]
self.percent_naughty = self.rating.count('negative') / len(self.rating)
if self.percent_naughty > 0.5:
print(self.user, "is", self.percent_naughty * 100, "percent NAUGHTY with an accuracy of", self.accuracy * 100)
self.naughtiness = self.user, "is", self.percent_naughty * 100, "percent NAUGHTY with an accuracy of", self.accuracy * 100
else:
print(self.user, "is", 100 - (self.percent_naughty * 100), "percent NICE with an accuracy of", self.accuracy * 100)
self.naughtiness = self.user, "is", 100 - (self.percent_naughty * 100), "percent NICE with an accuracy of", self.accuracy * 100
return self.naughtiness
##EXECUTE THE PROGRAM AND THE BOT/SHELL
def bot(self):
print("You have entered the naughty and nice shell. Type \"Help\" for more info")
while True:
comm = input("> ")
#if the command is "Help"
if comm == "Help":
print("Help: Type \"Help\" to get this menu. Type \"Store Tweets\" to store the tweets you have collected ", end="")
print("in a file called tweets.txt. Type \"Calculate Naughtiness\" to calculate the naughtiness of one users's twitter profile. You will be prompted to give the user's username. Type \"Exit\" to exit the program.")
#if the command is "Store Tweets"
elif comm == "Store Tweets":
print("Storing Tweets...")
self.file = 'tweets.txt'
self.tweets = self.store_tweets(self.file, self.tweets)
print("Done")
#if the command is "Calculate Naughtiness"
elif comm == "Calculate Naughtiness":
print("Cleaning Tweets...")
self.tweets = self.clean_tweets(self.tweets)
print("Done")
print("Sorting Tweets...")
self.pos_tweets, self.neg_tweets = self.sort_tweets(self.tweets)
print("Done")
print("Training Classifier...")
self.classifier, self.accuracy = self.train_classifier(self.pos_tweets, self.neg_tweets)
print("Done")
self.user = input("Which user's account do you want to calculate their naughtiness? Enter a username. ")
print("Calculating Naughtiness from " + self.user + "...")
msg = self.calculate_naughty(self.classifier, self.accuracy, self.user)
print("Done")
ans = input("Do you want to tweet this person's naughtiness on twitter? Y/n ")
if ans == "Y" or ans == "y":
self.msg = str(msg)
print("Tweeting " + self.msg)
api.update_status(self.msg)
else:
print("Ok")
#if the command is "Exit"
elif comm == "Exit":
print("Exiting...")
break
#Else
else:
print("That is not a command!")
while True:
howManyTweets = input("How many tweets do you want? ")
try:
howManyTweets = int(howManyTweets)
break
except Exception as e:
print(str(e) + " . Please try again!")
myStreamListener = MyStreamListener()
myStream = tweepy.Stream(auth = api.auth, listener=myStreamListener)
tweets = []
naughty_and_nice(tweets) | PypiClean |
/Kamaelia-0.6.0.tar.gz/Kamaelia-0.6.0/Tools/Whiteboard/WhiteboardPlayer.py |
# whiteboard recorder
# records the stream of data coming from a whiteboard system, timestamping the data as it goes
import Axon
from Axon.Component import component
from Axon.Ipc import WaitComplete, producerFinished, shutdownMicroprocess
from Kamaelia.Util.Console import ConsoleReader, ConsoleEchoer
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Visualisation.PhysicsGraph.chunks_to_lines import chunks_to_lines
from Kamaelia.Visualisation.PhysicsGraph.lines_to_tokenlists import lines_to_tokenlists as text_to_tokenlists
from Kamaelia.Apps.Whiteboard.Tokenisation import tokenlists_to_lines, lines_to_tokenlists
import sys
from Kamaelia.Apps.Whiteboard.Entuple import Entuple
class Timestamp(component):
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (producerFinished, shutdownMicroprocess)):
return True
return False
def main(self):
import time
start=time.time()
while not self.shutdown():
while self.dataReady("inbox"):
data = self.recv("inbox")
msg = str(time.time()-start) + " " + data
self.send( msg, "outbox" )
self.pause()
yield 1
class DeTimestamp(component):
Outboxes = { "outbox" : "Detimestamped string data",
"signal" : "Shutdown signalling",
"next" : "Requests for more timestamped data (number of items needed)",
}
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, (producerFinished, shutdownMicroprocess)):
return msg
return False
def main(self):
import time
start=None
waiting = []
shuttingdown=False
BUFFERSIZE=10
self.send(BUFFERSIZE, "next")
while not shuttingdown or waiting or self.dataReady("inbox"):
shuttingdown = shuttingdown or self.shutdown()
if self.dataReady("inbox"):
msg = self.recv("inbox")
when, data = msg.split(" ",1)
if start==None:
start=time.time()
when = start+ float(when)
waiting.append( (when,data) )
sentcount=0
while waiting and waiting[0][0] <= time.time():
when, data = waiting.pop(0)
self.send(data,"outbox")
sentcount+=1
if sentcount:
self.send(sentcount, "next")
if not waiting and not shuttingdown and not self.dataReady("inbox"):
self.pause()
yield 1
self.send(shuttingdown,"signal")
class IntersperseNewlines(component):
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (producerFinished, shutdownMicroprocess)):
return True
return False
def main(self):
while not self.shutdown():
while self.dataReady("inbox"):
data = self.recv("inbox")
self.send( data, "outbox" )
self.send("\n", "outbox" )
self.pause()
yield 1
if __name__=="__main__":
from Kamaelia.Internet.TCPClient import TCPClient
from Kamaelia.File.Reading import PromptedFileReader
from Kamaelia.File.Writing import SimpleFileWriter
from Kamaelia.Apps.Whiteboard.SingleShot import OneShot
try:
if "--help" in sys.argv:
sys.stderr.write("Usage:\n ./WhiteboardPlayer.py filename host port\n\n")
sys.exit(0)
filename = sys.argv[1]
rhost = sys.argv[2]
rport = int(sys.argv[3])
except:
sys.stderr.write("Usage:\n ./WhiteboardPlayer.py filename host port\n\n")
sys.exit(1)
print "Playing..."
Pipeline(
Graphline(
FILEREADER = PromptedFileReader(filename, "lines"),
DETIMESTAMP = DeTimestamp(),
linkages = {
# data from file gets detimestamped and sent on
("FILEREADER", "outbox") : ("DETIMESTAMP", "inbox"),
("DETIMESTAMP", "outbox") : ("", "outbox"),
# detimestamper asks for more data to be read from file
("DETIMESTAMP", "next") : ("FILEREADER", "inbox"),
# shutdown wiring
("", "control") : ("FILEREADER", "control"),
("FILEREADER", "signal") : ("DETIMESTAMP", "control"),
("DETIMESTAMP", "signal") : ("", "signal"),
}
),
TCPClient(host=rhost, port=rport),
).run() | PypiClean |
/FuzzyClassificator-1.3.84-py3-none-any.whl/pybrain/datasets/reinforcement.py | __author__ = 'Thomas Rueckstiess, [email protected]'
from pybrain.datasets.sequential import SequentialDataSet
from pybrain.datasets.dataset import DataSet
from scipy import zeros
class ReinforcementDataSet(SequentialDataSet):
def __init__(self, statedim, actiondim):
""" initialize the reinforcement dataset, add the 3 fields state, action and
reward, and create an index marker. This class is basically a wrapper function
that renames the fields of SupervisedDataSet into the more common reinforcement
learning names. Instead of 'episodes' though, we deal with 'sequences' here. """
DataSet.__init__(self)
# add 3 fields: input, target, importance
self.addField('state', statedim)
self.addField('action', actiondim)
self.addField('reward', 1)
# link these 3 fields
self.linkFields(['state', 'action', 'reward'])
# reset the index marker
self.index = 0
# add field that stores the beginning of a new episode
self.addField('sequence_index', 1)
self.append('sequence_index', 0)
self.currentSeq = 0
self.statedim = statedim
self.actiondim = actiondim
# the input and target dimensions (for compatibility)
self.indim = self.statedim
self.outdim = self.actiondim
def addSample(self, state, action, reward):
""" adds a new sample consisting of state, action, reward.
:key state: the current state of the world
:key action: the executed action by the agent
:key reward: the reward received for action in state """
self.appendLinked(state, action, reward)
def getSumOverSequences(self, field):
sums = zeros((self.getNumSequences(), self.getDimension(field)))
for n in range(self.getNumSequences()):
sums[n, :] = sum(self._getSequenceField(n, field), 0)
return sums
def __reduce__(self):
# FIXME: This does actually not feel right: We have to use the DataSet
# method here, although we inherit from sequential dataset.
_, _, state, _, _ = DataSet.__reduce__(self)
creator = self.__class__
args = self.statedim, self.actiondim
return creator, args, state, iter([]), iter({}) | PypiClean |
/ECRScan-0.2.2.tar.gz/ECRScan-0.2.2/ecrscan/command.py | import sys
import logging
import click
from ecrscan.utility import init_boto3_clients
from ecrscan.utility import get_results
from ecrscan.utility import scan_image
logging.basicConfig(
level=logging.INFO,
format='[%(levelname)s] %(asctime)s (%(module)s) %(message)s',
datefmt='%Y/%m/%d-%H:%M:%S'
)
SERVICES = ['ecr']
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
@click.group()
@click.version_option(version='0.2.2')
def cli():
'''
A utility for working with ECR image scan results
'''
pass
@cli.command()
@click.option('--repository', '-r', help='ECR repository of interest', required=True)
@click.option('--registry-id', help='ECR registry ID of interest')
@click.option('--tag', '-t', help='ECR repository tag of interest', required=True)
@click.option('--profile', help='AWS credential config')
@click.option('--region', help='AWS region')
@click.option('--ignore-errors', help='Ignore errors, always exit 0', is_flag=True)
def rescan(repository, tag, registry_id, profile, region, ignore_errors):
'''
This is an entry-point for the utility (re)scan an existing image.
'''
try:
services = init_boto3_clients(SERVICES, profile, region)
ecr_client = services.get('ecr')
if ecr_client is None:
logger.error('could not get an ECR client')
sys.exit(1)
if scan_image(ecr_client, repository, tag, registry_id):
sys.exit(0)
else:
if ignore_errors:
logger.info('reasons to panic ignored')
sys.exit(0)
else:
sys.exit(1)
except Exception as wartburg_track_and_field:
logger.error(wartburg_track_and_field, exc_info=False)
sys.exit(2)
@cli.command()
@click.option('--repository', '-r', help='ECR repository of interest', required=True)
@click.option('--registry-id', help='ECR registry ID of interest')
@click.option('--tag', '-t', help='ECR repository tag of interest', required=True)
@click.option('--profile', help='AWS credential config')
@click.option('--region', help='AWS region')
@click.option('--ignore-errors', help='Ignore errors, always exit 0', is_flag=True)
def report(repository, tag, registry_id, profile, region, ignore_errors):
'''
This is an entry-point for the utility report scan results.
'''
try:
services = init_boto3_clients(SERVICES, profile, region)
ecr_client = services.get('ecr')
if ecr_client is None:
logger.error('could not get an ECR client')
sys.exit(1)
if get_results(ecr_client, repository, tag, registry_id):
sys.exit(0)
else:
if ignore_errors:
logger.info('reasons to panic ignored')
sys.exit(0)
else:
sys.exit(1)
except Exception as wartburg_track_and_field:
logger.error(wartburg_track_and_field, exc_info=False)
sys.exit(2) | PypiClean |
/KratosSwimmingDEMApplication-9.2.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/KratosMultiphysics/SwimmingDEMApplication/apply_porosity_solution_body_force_process.py | import KratosMultiphysics
import KratosMultiphysics.SwimmingDEMApplication as KratosSDEM
def Factory(settings, Model):
if not isinstance(settings, KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return ApplyPorositySolutionTransientBodyForceProcess(Model, settings["Parameters"])
## All the processes python should be derived from "Process"
class ApplyPorositySolutionTransientBodyForceProcess(KratosMultiphysics.Process):
def __init__(self, model, settings):
"""The default constructor of the class.
Keyword arguments:
self -- It signifies an instance of a class.
model -- the container of the fluid model part.
settings -- Kratos parameters containing process settings.
"""
KratosMultiphysics.Process.__init__(self)
default_settings = KratosMultiphysics.Parameters("""
{
"model_part_name" : "please_specify_model_part_name",
"variable_name" : "BODY_FORCE",
"benchmark_name" : "custom_body_force.vortex",
"benchmark_parameters" : {},
"compute_nodal_error" : true,
"print_convergence_output" : false,
"output_parameters" : {}
}
"""
)
self.settings = settings
self.settings.ValidateAndAssignDefaults(default_settings)
self.model_part = model[self.settings["model_part_name"].GetString()]
self.variable = KratosMultiphysics.KratosGlobals.GetVariable(self.settings["variable_name"].GetString())
self.ApplyPorositySolutionTransientBodyForceProcess = KratosSDEM.PorositySolutionTransientBodyForceProcess(self.model_part, self.settings)
def ExecuteBeforeSolutionLoop(self):
self.ApplyPorositySolutionTransientBodyForceProcess.ExecuteBeforeSolutionLoop()
def ExecuteInitializeSolutionStep(self):
self.ApplyPorositySolutionTransientBodyForceProcess.ExecuteInitializeSolutionStep()
def ExecuteFinalizeSolutionStep(self):
pass | PypiClean |
/AXX_AIAPI-1.3.0-py3-none-any.whl/axx_aiapp/templates/project_template/project_name/concurrent_log/__init__.py | import time
import logging
import logging.config
import os
from logging.handlers import TimedRotatingFileHandler
import portalocker.constants as porta_lock_const
from portalocker.utils import Lock as PortaLock
class ConcurrentLogFileLock(PortaLock):
def __init__(self, filename, *args, **kwargs):
PortaLock.__init__(self, self.get_lock_filename(filename), *args, **kwargs)
def get_lock_filename(self, log_file_name):
"""
定义日志文件锁名称,类似于 `.__file.lock`,其中file与日志文件baseFilename一致
:return: 锁文件名称
"""
if log_file_name.endswith(".log"):
lock_file = log_file_name[:-4]
else:
lock_file = log_file_name
lock_file += ".lock"
lock_path, lock_name = os.path.split(lock_file)
# hide the file on Unix and generally from file completion
lock_name = ".__" + lock_name
return os.path.join(lock_path, lock_name)
class ConcurrentTimedRotatingFileHandler(TimedRotatingFileHandler):
# 上一次翻转时间
before_rollover_at = -1
def __init__(self, filename, *args, **kwargs):
TimedRotatingFileHandler.__init__(self, filename, *args, **kwargs)
file_path = os.path.split(filename)[0]
if not os.path.exists(file_path):
os.makedirs(file_path)
self.concurrent_lock = ConcurrentLogFileLock(filename, flags=porta_lock_const.LOCK_EX)
def emit(self, record) -> None:
"""
本方法继承Python标准库,修改的部分已在下方使用注释标记出
本次改动主要是对日志文件进行加锁,并且保证在多进程环境下日志内容切割正确
"""
# 此行为新增代码,尝试获取非重入进程锁,阻塞,直到成功获取
with self.concurrent_lock:
try:
if self.shouldRollover(record):
self.doRollover()
"""
如果日志内容创建时间小于上一次翻转时间,不能记录在baseFilename文件中,否则正常记录
处理日志写入哪个日志文件,修改开始
"""
if record.created <= ConcurrentTimedRotatingFileHandler.before_rollover_at:
currentTime = int(record.created)
# v 引用Python3.7标准库logging.TimedRotatingFileHandler.doRollover(110:124)中翻转目标文件名生成代码 v
dstNow = time.localtime(currentTime)[-1]
t = self.computeRollover(currentTime) - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
# ^ 引用标准库TimedRotatingFileHandler中翻转目标文件名生成规则代码 ^
# 如果back_count值设置的过低,会出现日志文件实际数量大于设置值
# 因为当日志写入负载过高时,之前的某个时刻产生的日志会延迟到现在才进行写入,在写入时又找不到与时间对应的日志文件,
# 则会再创建一个与日志创建时刻对应的日志文件进行写入。
# 对应的日志文件是指达到翻转条件后创建的翻转文件,文件命名规则与标准库一致。
self._do_write_record(dfn, record)
else:
logging.FileHandler.emit(self, record)
"""
处理日志写入哪个日志文件,修改结束
"""
except Exception:
self.handleError(record)
def doRollover(self):
"""
本方法继承Python标准库,修改的部分已在下方使用注释标记出
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
"""
如果翻转文件已经生成,则说明其他进程已经处理过翻转
处理日志文件已经翻转当前进程中未写入文件的日志副本,修改开始
"""
# 直接修改静态变量,因为代码执行到此处已经获取到非重入进程锁,保证同一时间只有一个线程对变量进行修改
# 由于Python GIL,同一时间同一进程内只有一个线程运行,线程切换后缓存自动失效,即其他线程可以看见修改后的最新值
# 记录每一次触发翻转动作的时间,不管反转是否真的执行
ConcurrentTimedRotatingFileHandler.before_rollover_at = self.rolloverAt
if os.path.exists(dfn):
# 因为进程变量不会在内存同步,所以存在其他进程已经翻转过日志文件当时当前进程中还标识为未翻转
# 日志内容创建时间如果小于等于下一个处理翻转时刻,则将日志写入反转后的日志文件,而不是当前的baseFilename
# 当前磁盘上的baseFilename对于当前进程中的标识副本来说已经是翻转后要写入的文件
# 所以当文件存在时,本次不再进行翻转动作
pass
else:
self.rotate(self.baseFilename, dfn)
"""
处理日志文件已经翻转当前进程中未写入文件的日志副本,修改结束
"""
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
# If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
# 此刻,当前进程中的标识副本已经同步为最新
self.rolloverAt = newRolloverAt
def _do_write_record(self, dfn, record):
"""
将日志内容写入指定文件
:param dfn: 指定日志文件
:param record: 日志内容
"""
with open(dfn, mode="a", encoding=self.encoding) as file:
file.write(self.format(record) + self.terminator)
# 添加当前类到 "logging.handlers" 模块中,使logging.config.fileConfig()进行配置时可以使用
import logging.handlers
logging.handlers.ConcurrentTimedRotatingFileHandler = ConcurrentTimedRotatingFileHandler | PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/js/tiny_mce/plugins/media/langs/en_dlg.js | tinyMCE.addI18n('en.media_dlg',{list:"List",file:"File/URL",advanced:"Advanced",general:"General",title:"Insert/Edit Embedded Media","align_top_left":"Top Left","align_center":"Center","align_left":"Left","align_bottom":"Bottom","align_right":"Right","align_top":"Top","qt_stream_warn":"Streamed RTSP resources should be added to the QT Source field under the Advanced tab.\nYou should also add a non-streamed version to the Source field.",qtsrc:"QT Source",progress:"Progress",sound:"Sound",swstretchvalign:"Stretch V-Align",swstretchhalign:"Stretch H-Align",swstretchstyle:"Stretch Style",scriptcallbacks:"Script Callbacks","align_top_right":"Top Right",uimode:"UI Mode",rate:"Rate",playcount:"Play Count",defaultframe:"Default Frame",currentposition:"Current Position",currentmarker:"Current Marker",captioningid:"Captioning ID",baseurl:"Base URL",balance:"Balance",windowlessvideo:"Windowless Video",stretchtofit:"Stretch to Fit",mute:"Mute",invokeurls:"Invoke URLs",fullscreen:"Full Screen",enabled:"Enabled",autostart:"Auto Start",volume:"Volume",target:"Target",qtsrcchokespeed:"Choke Speed",href:"HREF",endtime:"End Time",starttime:"Start Time",enablejavascript:"Enable JavaScript",correction:"No Correction",targetcache:"Target Cache",playeveryframe:"Play Every Frame",kioskmode:"Kiosk Mode",controller:"Controller",menu:"Show Menu",loop:"Loop",play:"Auto Play",hspace:"H-Space",vspace:"V-Space","class_name":"Class",name:"Name",id:"ID",type:"Type",size:"Dimensions",preview:"Preview","constrain_proportions":"Constrain Proportions",controls:"Controls",numloop:"Num Loops",console:"Console",cache:"Cache",autohref:"Auto HREF",liveconnect:"SWLiveConnect",flashvars:"Flash Vars",base:"Base",bgcolor:"Background",wmode:"WMode",salign:"SAlign",align:"Align",scale:"Scale",quality:"Quality",shuffle:"Shuffle",prefetch:"Prefetch",nojava:"No Java",maintainaspect:"Maintain Aspect",imagestatus:"Image Status",center:"Center",autogotourl:"Auto Goto URL","shockwave_options":"Shockwave Options","rmp_options":"Real Media Player Options","wmp_options":"Windows Media Player Options","qt_options":"QuickTime Options","flash_options":"Flash Options",hidden:"Hidden","align_bottom_left":"Bottom Left","align_bottom_right":"Bottom Right","html5_video_options":"HTML5 Video Options",altsource1:"Alternative source 1",altsource2:"Alternative source 2",preload:"Preload",poster:"Poster",source:"Source","html5_audio_options":"Audio Options","preload_none":"Don\'t Preload","preload_metadata":"Preload video metadata","preload_auto":"Let user\'s browser decide", "embedded_audio_options":"Embedded Audio Options", video:"HTML5 Video", audio:"HTML5 Audio", flash:"Flash", quicktime:"QuickTime", shockwave:"Shockwave", windowsmedia:"Windows Media", realmedia:"Real Media", iframe:"Iframe", embeddedaudio:"Embedded Audio" }); | PypiClean |
/OBP_reliability_pillar_1-0.0.12-py3-none-any.whl/OBP_reliability_pillar_1/ec2/instance_in_vpc.py | import botocore
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def instance_in_vpc(self) -> dict:
"""
:param self:
:return:
"""
logger.info(" ---Inside ec2 : instance_in_vpc()")
result = True
failReason = ''
offenders = []
compliance_type = "EC2 instance In VPC"
description = "Checks if your EC2 instances belong to a virtual private cloud (VPC)"
resource_type = "EC2 Instance"
risk_level = 'Medium'
regions = self.session.get_available_regions('ec2')
for region in regions:
try:
client = self.session.client('ec2', region_name=region)
marker = ''
while True:
response = client.describe_instances(
MaxResults=1000,
NextToken=marker
)
if len(response['Reservations']) > 0:
for reservation in response['Reservations']:
for instance in reservation['Instances']:
try:
vpc_id = instance['VpcId']
if vpc_id == '' or vpc_id is None:
raise KeyError
except KeyError:
result = False
failReason = "Instances does not belong to any VPC"
offenders.append(instance['InstanceId'])
try:
marker = response['NextToken']
if marker == '':
break
except KeyError:
break
except botocore.exceptions.ClientError as e:
logger.error('Something went wrong with region {}: {}'.format(region, e))
return {
'Result': result,
'failReason': failReason,
'resource_type': resource_type,
'Offenders': offenders,
'Compliance_type': compliance_type,
'Description': description,
'Risk Level': risk_level
} | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/BootstrapValidator/js/language/sq_AL.js | (function($) {
/**
* Albanian language package
* Translated by @desaretiuss
*/
$.fn.bootstrapValidator.i18n = $.extend(true, $.fn.bootstrapValidator.i18n, {
base64: {
'default': 'Ju lutem përdorni sistemin e kodimit Base64'
},
between: {
'default': 'Ju lutem vendosni një vlerë midis %s dhe %s',
notInclusive: 'Ju lutem vendosni një vlerë rreptësisht midis %s dhe %s'
},
callback: {
'default': 'Ju lutem vendosni një vlerë të vlefshme'
},
choice: {
'default': 'Ju lutem vendosni një vlerë të vlefshme',
less: 'Ju lutem përzgjidhni së paku %s mundësi',
more: 'Ju lutem përzgjidhni së shumti %s mundësi ',
between: 'Ju lutem përzgjidhni %s - %s mundësi'
},
color: {
'default': 'Ju lutem vendosni një ngjyrë të vlefshme'
},
creditCard: {
'default': 'Ju lutem vendosni një numër karte krediti të vlefshëm'
},
cusip: {
'default': 'Ju lutem vendosni një numër CUSIP të vlefshëm'
},
cvv: {
'default': 'Ju lutem vendosni një numër CVV të vlefshëm'
},
date: {
'default': 'Ju lutem vendosni një datë të saktë',
min: 'Ju lutem vendosni një datë pas %s',
max: 'Ju lutem vendosni një datë para %s',
range: 'Ju lutem vendosni një datë midis %s - %s'
},
different: {
'default': 'Ju lutem vendosni një vlerë tjetër'
},
digits: {
'default': 'Ju lutem vendosni vetëm numra'
},
ean: {
'default': 'Ju lutem vendosni një numër EAN të vlefshëm'
},
emailAddress: {
'default': 'Ju lutem vendosni një adresë email të vlefshme'
},
file: {
'default': 'Ju lutem përzgjidhni një skedar të vlefshëm'
},
greaterThan: {
'default': 'Ju lutem vendosni një vlerë më të madhe ose të barabartë me %s',
notInclusive: 'Ju lutem vendosni një vlerë më të madhe se %s'
},
grid: {
'default': 'Ju lutem vendosni një numër GRId të vlefshëm'
},
hex: {
'default': 'Ju lutem vendosni një numër të saktë heksadecimal'
},
hexColor: {
'default': 'Ju lutem vendosni një ngjyrë të vlefshme heksadecimale'
},
iban: {
'default': 'Ju lutem vendosni një numër IBAN të vlefshëm',
countryNotSupported: 'Kodi i shtetit %s nuk është i mundësuar',
country: 'Ju lutem vendosni një numër IBAN të vlefshëm në %s',
countries: {
AD: 'Andora',
AE: 'Emiratet e Bashkuara Arabe',
AL: 'Shqipëri',
AO: 'Angola',
AT: 'Austri',
AZ: 'Azerbajxhan',
BA: 'Bosnjë dhe Hercegovinë',
BE: 'Belgjikë',
BF: 'Burkina Faso',
BG: 'Bullgari',
BH: 'Bahrein',
BI: 'Burundi',
BJ: 'Benin',
BR: 'Brazil',
CH: 'Zvicër',
CI: 'Bregu i fildishtë',
CM: 'Kamerun',
CR: 'Kosta Rika',
CV: 'Kepi i Gjelbër',
CY: 'Qipro',
CZ: 'Republika Çeke',
DE: 'Gjermani',
DK: 'Danimarkë',
DO: 'Dominika',
DZ: 'Algjeri',
EE: 'Estoni',
ES: 'Spanjë',
FI: 'Finlandë',
FO: 'Ishujt Faroe',
FR: 'Francë',
GB: 'Mbretëria e Bashkuar',
GE: 'Gjeorgji',
GI: 'Gjibraltar',
GL: 'Groenlandë',
GR: 'Greqi',
GT: 'Guatemalë',
HR: 'Kroaci',
HU: 'Hungari',
IE: 'Irlandë',
IL: 'Izrael',
IR: 'Iran',
IS: 'Islandë',
IT: 'Itali',
JO: 'Jordani',
KW: 'Kuvajt',
KZ: 'Kazakistan',
LB: 'Liban',
LI: 'Lihtenshtejn',
LT: 'Lituani',
LU: 'Luksemburg',
LV: 'Letoni',
MC: 'Monako',
MD: 'Moldavi',
ME: 'Mal i Zi',
MG: 'Madagaskar',
MK: 'Maqedoni',
ML: 'Mali',
MR: 'Mauritani',
MT: 'Maltë',
MU: 'Mauricius',
MZ: 'Mozambik',
NL: 'Hollandë',
NO: 'Norvegji',
PK: 'Pakistan',
PL: 'Poloni',
PS: 'Palestinë',
PT: 'Portugali',
QA: 'Katar',
RO: 'Rumani',
RS: 'Serbi',
SA: 'Arabi Saudite',
SE: 'Suedi',
SI: 'Slloveni',
SK: 'Sllovaki',
SM: 'San Marino',
SN: 'Senegal',
TN: 'Tunizi',
TR: 'Turqi',
VG: 'Ishujt Virxhin Britanikë'
}
},
id: {
'default': 'Ju lutem vendosni një numër identifikimi të vlefshëm ',
countryNotSupported: 'Kodi i shtetit %s nuk është i mundësuar',
country: 'Ju lutem vendosni një numër identifikimi të vlefshëm në %s',
countries: {
BA: 'Bosnjë dhe Hercegovinë',
BG: 'Bullgari',
BR: 'Brazil',
CH: 'Zvicër',
CL: 'Kili',
CN: 'Kinë',
CZ: 'Republika Çeke',
DK: 'Danimarkë',
EE: 'Estoni',
ES: 'Spanjë',
FI: 'Finlandë',
HR: 'Kroaci',
IE: 'Irlandë',
IS: 'Islandë',
LT: 'Lituani',
LV: 'Letoni',
ME: 'Mal i Zi',
MK: 'Maqedoni',
NL: 'Hollandë',
RO: 'Rumani',
RS: 'Serbi',
SE: 'Suedi',
SI: 'Slloveni',
SK: 'Slovaki',
SM: 'San Marino',
TH: 'Tajlandë',
ZA: 'Afrikë e Jugut'
}
},
identical: {
'default': 'Ju lutem vendosni të njëjtën vlerë'
},
imei: {
'default': 'Ju lutem vendosni numër IMEI të njëjtë'
},
imo: {
'default': 'Ju lutem vendosni numër IMO të vlefshëm'
},
integer: {
'default': 'Ju lutem vendosni një numër të vlefshëm'
},
ip: {
'default': 'Ju lutem vendosni një adresë IP të vlefshme',
ipv4: 'Ju lutem vendosni një adresë IPv4 të vlefshme',
ipv6: 'Ju lutem vendosni një adresë IPv6 të vlefshme'
},
isbn: {
'default': 'Ju lutem vendosni një numër ISBN të vlefshëm'
},
isin: {
'default': 'Ju lutem vendosni një numër ISIN të vlefshëm'
},
ismn: {
'default': 'Ju lutem vendosni një numër ISMN të vlefshëm'
},
issn: {
'default': 'Ju lutem vendosni një numër ISSN të vlefshëm'
},
lessThan: {
'default': 'Ju lutem vendosni një vlerë më të madhe ose të barabartë me %s',
notInclusive: 'Ju lutem vendosni një vlerë më të vogël se %s'
},
mac: {
'default': 'Ju lutem vendosni një adresë MAC të vlefshme'
},
meid: {
'default': 'Ju lutem vendosni një numër MEID të vlefshëm'
},
notEmpty: {
'default': 'Ju lutem vendosni një vlerë'
},
numeric: {
'default': 'Ju lutem vendosni një numër me presje notuese të saktë'
},
phone: {
'default': 'Ju lutem vendosni një numër telefoni të vlefshëm',
countryNotSupported: 'Kodi i shtetit %s nuk është i mundësuar',
country: 'Ju lutem vendosni një numër telefoni të vlefshëm në %s',
countries: {
BR: 'Brazil',
CN: 'Kinë',
CZ: 'Republika Çeke',
DE: 'Gjermani',
DK: 'Danimarkë',
ES: 'Spanjë',
FR: 'Francë',
GB: 'Mbretëria e Bashkuar',
MA: 'Marok',
PK: 'Pakistan',
RO: 'Rumani',
RU: 'Rusi',
SK: 'Sllovaki',
TH: 'Tajlandë',
US: 'SHBA',
VE: 'Venezuelë'
}
},
regexp: {
'default': 'Ju lutem vendosni një vlerë që përputhet me modelin'
},
remote: {
'default': 'Ju lutem vendosni një vlerë të vlefshme'
},
rtn: {
'default': 'Ju lutem vendosni një numër RTN të vlefshëm'
},
sedol: {
'default': 'Ju lutem vendosni një numër SEDOL të vlefshëm'
},
siren: {
'default': 'Ju lutem vendosni një numër SIREN të vlefshëm'
},
siret: {
'default': 'Ju lutem vendosni një numër SIRET të vlefshëm'
},
step: {
'default': 'Ju lutem vendosni një hap të vlefshëm të %s'
},
stringCase: {
'default': 'Ju lutem përdorni vetëm shenja të vogla të shtypit',
upper: 'Ju lutem përdorni vetëm shenja të mëdha të shtypit'
},
stringLength: {
'default': 'Ju lutem vendosni një vlerë me gjatësinë e duhur',
less: 'Ju lutem vendosni më pak se %s simbole',
more: 'Ju lutem vendosni më shumë se %s simbole',
between: 'Ju lutem vendosni një vlerë me gjatësi midis %s dhe %s simbole'
},
uri: {
'default': 'Ju lutem vendosni një URI të vlefshme'
},
uuid: {
'default': 'Ju lutem vendosni një numër UUID të vlefshëm',
version: 'Ju lutem vendosni një numër UUID version %s të vlefshëm'
},
vat: {
'default': 'Ju lutem vendosni një numër VAT të vlefshëm',
countryNotSupported: 'Kodi i shtetit %s nuk është i mundësuar',
country: 'Ju lutem vendosni një numër VAT të vlefshëm në %s',
countries: {
AT: 'Austri',
BE: 'Belgjikë',
BG: 'Bullgari',
BR: 'Brazil',
CH: 'Zvicër',
CY: 'Qipro',
CZ: 'Republika Çeke',
DE: 'Gjermani',
DK: 'Danimarkë',
EE: 'Estoni',
ES: 'Spanjë',
FI: 'Finlandë',
FR: 'Francë',
GB: 'Mbretëria e Bashkuar',
GR: 'Greqi',
EL: 'Greqi',
HU: 'Hungari',
HR: 'Kroaci',
IE: 'Irlandë',
IS: 'Iclandë',
IT: 'Itali',
LT: 'Lituani',
LU: 'Luksemburg',
LV: 'Letoni',
MT: 'Maltë',
NL: 'Hollandë',
NO: 'Norvegji',
PL: 'Poloni',
PT: 'Portugali',
RO: 'Rumani',
RU: 'Rusi',
RS: 'Serbi',
SE: 'Suedi',
SI: 'Slloveni',
SK: 'Sllovaki',
VE: 'Venezuelë',
ZA: 'Afrikë e Jugut'
}
},
vin: {
'default': 'Ju lutem vendosni një numër VIN të vlefshëm'
},
zipCode: {
'default': 'Ju lutem vendosni një kod postar të vlefshëm',
countryNotSupported: 'Kodi i shtetit %s nuk është i mundësuar',
country: 'Ju lutem vendosni një kod postar të vlefshëm në %s',
countries: {
AT: 'Austri',
BR: 'Brazil',
CA: 'Kanada',
CH: 'Zvicër',
CZ: 'Republika Çeke',
DE: 'Gjermani',
DK: 'Danimarkë',
FR: 'Francë',
GB: 'Mbretëria e Bashkuar',
IE: 'Irlandë',
IT: 'Itali',
MA: 'Marok',
NL: 'Hollandë',
PT: 'Portugali',
RO: 'Rumani',
RU: 'Rusi',
SE: 'Suedi',
SG: 'Singapor',
SK: 'Sllovaki',
US: 'SHBA'
}
}
});
}(window.jQuery)); | PypiClean |
/Apycula-0.9.0a1.tar.gz/Apycula-0.9.0a1/legacy/report.py | import json
nodes = { 0: "A0", 1: "B0", 2: "C0", 3: "D0", 4: "A1", 5: "B1", 6: "C1", 7: "D1", 8: "A2", 9: "B2", 10: "C2", 11: "D2", 12: "A3", 13: "B3", 14: "C3",
15: "D3", 16: "A4", 17: "B4", 18: "C4", 19: "D4", 20: "A5", 21: "B5", 22: "C5", 23: "D5", 24: "A6", 25: "B6", 26: "C6", 27: "D6", 28: "A7", 29: "B7",
30: "C7", 31: "D7", 32: "F0", 33: "F1", 34: "F2", 35: "F3", 36: "F4", 37: "F5", 38: "F6", 39: "F7", 40: "Q0", 41: "Q1", 42: "Q2", 43: "Q3", 44: "Q4",
45: "Q5", 46: "Q6", 47: "Q7", 48: "OF0", 49: "OF1", 50: "OF2", 51: "OF3", 52: "OF4", 53: "OF5", 54: "OF6", 55: "OF7", 56: "X01", 57: "X02", 58: "X03",
59: "X04", 60: "X05", 61: "X06", 62: "X07", 63: "X08", 64: "N100", 65: "SN10", 66: "SN20", 67: "N130", 68: "S100", 69: "S130", 70: "E100", 71: "EW10",
72: "EW20", 73: "E130", 74: "W100", 75: "W130", 76: "N200", 77: "N210", 78: "N220", 79: "N230", 80: "N240", 81: "N250", 82: "N260", 83: "N270", 84: "S200",
85: "S210", 86: "S220", 87: "S230", 88: "S240", 89: "S250", 90: "S260", 91: "S270", 92: "E200", 93: "E210", 94: "E220", 95: "E230", 96: "E240", 97: "E250",
98: "E260", 99: "E270", 100: "W200", 101: "W210", 102: "W220", 103: "W230", 104: "W240", 105: "W250", 106: "W260", 107: "W270", 108: "N800", 109: "N810",
110: "N820", 111: "N830", 112: "S800", 113: "S810", 114: "S820", 115: "S830", 116: "E800", 117: "E810", 118: "E820", 119: "E830", 120: "W800", 121: "W810",
122: "W820", 123: "W830", 124: "CLK0", 125: "CLK1", 126: "CLK2", 127: "LSR0", 128: "LSR1", 129: "LSR2", 130: "CE0", 131: "CE1", 132: "CE2", 133: "SEL0",
134: "SEL1", 135: "SEL2", 136: "SEL3", 137: "SEL4", 138: "SEL5", 139: "SEL6", 140: "SEL7", 141: "N101", 142: "N131", 143: "S101", 144: "S131", 145: "E101", 146: "E131",
147: "W101", 148: "W131", 149: "N201", 150: "N211", 151: "N221", 152: "N231", 153: "N241", 154: "N251", 155: "N261", 156: "N271", 157: "S201", 158: "S211",
159: "S221", 160: "S231", 161: "S241", 162: "S251", 163: "S261", 164: "S271", 165: "E201", 166: "E211", 167: "E221", 168: "E231", 169: "E241", 170: "E251",
171: "E261", 172: "E271", 173: "W201", 174: "W211", 175: "W221", 176: "W231", 177: "W241", 178: "W251", 179: "W261", 180: "W271", 181: "N202", 182: "N212",
183: "N222", 184: "N232", 185: "N242", 186: "N252", 187: "N262", 188: "N272", 189: "S202", 190: "S212", 191: "S222", 192: "S232", 193: "S242", 194: "S252",
195: "S262", 196: "S272", 197: "E202", 198: "E212", 199: "E222", 200: "E232", 201: "E242", 202: "E252", 203: "E262", 204: "E272", 205: "W202", 206: "W212",
207: "W222", 208: "W232", 209: "W242", 210: "W252", 211: "W262", 212: "W272", 213: "N804", 214: "N814", 215: "N824", 216: "N834", 217: "S804", 218: "S814",
219: "S824", 220: "S834", 221: "E804", 222: "E814", 223: "E824", 224: "E834", 225: "W804", 226: "W814", 227: "W824", 228: "W834", 229: "N808", 230: "N818",
231: "N828", 232: "N838", 233: "S808", 234: "S818", 235: "S828", 236: "S838", 237: "E808", 238: "E818", 239: "E828", 240: "E838", 241: "W808", 242: "W818",
243: "W828", 244: "W838", 245: "E110", 246: "W110", 247: "E120", 248: "W120", 249: "S110", 250: "N110", 251: "S120", 252: "N120", 253: "E111", 254: "W111",
255: "E121", 256: "W121", 257: "S111", 258: "N111", 259: "S121", 260: "N121", 261: "LB01", 262: "LB11", 263: "LB21", 264: "LB31", 265: "LB41", 266: "LB51",
267: "LB61", 268: "LB71", 269: "GB00", 270: "GB10", 271: "GB20", 272: "GB30", 273: "GB40", 274: "GB50", 275: "GB60", 276: "GB70", 277: "VCC", 278: "VSS",
279: "LT00", 280: "LT10", 281: "LT20", 282: "LT30", 283: "LT02", 284: "LT13", 285: "LT01", 286: "LT04", 287: "LBO0", 288: "LBO1", 289: "SS00", 290: "SS40",
291: "GT00", 292: "GT10", 293: "GBO0", 294: "GBO1", 295: "DI0", 296: "DI1", 297: "DI2", 298: "DI3", 299: "DI4", 300: "DI5", 301: "DI6", 302: "DI7",
303: "CIN0", 304: "CIN1", 305: "CIN2", 306: "CIN3", 307: "CIN4", 308: "CIN5", 309: "COUT0", 310: "COUT1", 311: "COUT2", 312: "COUT3", 313: "COUT4", 314: "COUT5"}
with open('dat.json') as f:
d = json.load(f)
for x in ['X0', 'X1', 'X2', 'X8', 'X11', 'Lut', 'Clk', 'Lsr', 'Ce', 'Sel']:
print("Wires", x)
for p, i in zip(d[f'{x}s'], d[f'{x}Ins']):
print(nodes.get(p), [nodes.get(n) for n in i]) | PypiClean |
/CROPS-0.1.1-py3-none-any.whl/crops/io/parsers.py | from crops.about import __prog__, __description__, __author__, __date__, __version__
import gemmi
import os
import csv
from crops.elements.sequence import Sequence
from crops.io.taggers import retrieve_id
from crops.elements.intervals import intinterval
def import_db(inpath,pdb_in=None):
"""Imports intervals database. Input must be a .csv file (filepath).
If imported file is not 'pdb_chain_uniprot.csv' from SIFTS database,
the columns must contain molecule ID, chain ID, lower element of subset,
and higher element of subset, in this order.
:param inpath: Path to interval database used.
:type inpath: str
:param pdb_in: Chain ID(s). If given, the imported values
will be filtered to contain only IDs provided, defaults to None.
:type pdb_in: str, dict, optional
:raises TypeError: When pdb_in is given and is neither a string nor a dictionary.
:return: dict [str, :class:`~crops.elements.intervals.intinterval`im]
:rtype: A dictionary of :class:`~crops.elements.intervals.intinterval`.
"""
database_out={}
if isinstance(pdb_in,str):
pdb_in_lower={}
pdb_in_lower[pdb_in.lower()]=None
elif isinstance(pdb_in,dict):
pdb_in_lower={}
for element in pdb_in:
if not isinstance(element,str):
raise TypeError('Argument should be either None, a string, or a dictionary with empty values.')
pdb_in_lower[element.lower()]=None
elif pdb_in is None:
pass
else:
raise TypeError('Argument should be either None, a string, or a dictionary with empty values.')
if os.path.basename(inpath)=='pdb_chain_uniprot.csv':
mol=0
chain=1
up=2
leftend=3
rightend=4
else:
mol=0
chain=1
leftend=2
rightend=3
up=None
csv_chain_file = open(inpath)
csv_chain = csv.reader(csv_chain_file)
for entry in csv_chain:
if entry[0][0] != "#" and entry[0] !="PDB":
if pdb_in is None or entry[mol].lower() in pdb_in_lower:
if entry[mol].lower() not in database_out:
database_out[entry[mol].lower()]={}
if entry[chain] not in database_out[entry[mol].lower()]:
database_out[entry[mol].lower()][entry[chain]]=intinterval(description=entry[mol].lower()+'_'+entry[chain])
if up is not None:
database_out[entry[mol].lower()][entry[chain]].tags['uniprot']={}
database_out[entry[mol].lower()][entry[chain]]= \
database_out[entry[mol].lower()][entry[chain]].union(other=[int(entry[leftend]),int(entry[rightend])])
if up is not None:
if entry[up].upper() not in database_out[entry[mol].lower()][entry[chain]].tags['uniprot']:
database_out[entry[mol].lower()][entry[chain]].tags['uniprot'][entry[up]]=intinterval(description=entry[up].upper())
database_out[entry[mol].lower()][entry[chain]].tags['uniprot'][entry[up]]=\
database_out[entry[mol].lower()][entry[chain]].tags['uniprot'][entry[up]].union([int(entry[leftend]),int(entry[rightend])])
return database_out
def parsestrfile(str_inpath):
"""Returns dictionary containing :class:`~gemmi.Structure` objects and another one with the file names.
:param str_inpath: Either a directory or file path.
:type str_inpath: str
:raises KeyError: More than one structure file containing same identifier.
:return strdict: A dictionary containing imported :class:`~gemmi.Structure` objects.
:rtype strdict: dict [str, :class:`~gemmi.Structure`]
:return filedict: A dictionary containing file names.
:rtype filedict: dict [str, str]
"""
strdict={}
filedict={}
if os.path.isfile(str_inpath):
structure=gemmi.read_structure(str_inpath)
pdbid=structure.name.lower()
strdict[pdbid]=structure
filedict[pdbid]=os.path.basename(str_inpath)
elif os.path.isdir(str_inpath):
filelist=os.listdir(str_inpath)
for file in filelist:
if os.isfile(file):
try:
structure=gemmi.read_structure(file)
pdbid=structure.name.lower()
if pdbid in strdict:
raise KeyError('Structure '+pdbid+' loaded more than once. Check files in directory and remove duplicates.')
strdict[pdbid]=structure
filedict[pdbid]=os.path.basename(str_inpath)
except:
pass
return strdict, filedict
def parseseqfile(inpath,uniprot=None):
"""Sequence file parser.
:param inpath: Sequence file path.
:type inpath: str
:param uniprot: A dictionary of Uniprot codes, defaults to None.
:type uniprot: str, dict [str, any], optional
:return: A dictionary containing parsed :class:`~crops.elements.sequence.Sequence`.
If uniprot is not None, the dictionary will contain a single entry with a :class:`~crops.elements.sequence.Sequence`
that will contain the requested Uniprot chains as :class:`~crops.elements.sequence.monomer_sequence` objects.
:rtype: dict [str, :class:`~crops.elements.sequence.Sequence`]
"""
newseqs={}
newid=[]
head=''
chain=''
ignore=False
if uniprot is not None:
if not isinstance(uniprot,str) and not isinstance(uniprot,dict):
raise TypeError('Input argument uniprot must be either a string or a dictionary.')
elif isinstance(uniprot,str):
unitemp=uniprot
uniprot={}
uniprot[unitemp]=None
for upcode in uniprot:
if not isinstance(upcode,str):
raise TypeError('Input argument uniprot must be either a string or a dictionary.')
with open(inpath,'r') as f:
indx=-1
while True:
line=f.readline().rstrip()
if (not line or line.startswith(">")) and not ignore:
if uniprot is not None:
if indx>=0:
if len(newseqs)==0:
newseqs['uniprot']=Sequence(seq_id=newid[0].upper(),source=os.path.basename(inpath))
if newid[0].upper() not in newseqs['uniprot'].imer:
newseqs['uniprot'].add_monomer(nheader=head,nseq=chain,nid=newid[0].upper())
if len(newseqs['uniprot'].imer)==len(uniprot):
break
else:
if indx>=0:
if newid[0].lower() not in newseqs:
newseqs[newid[0].lower()]=Sequence(seq_id=newid[0].lower(),source=os.path.basename(inpath))
for iid in newid[1]:
newseqs[newid[0].lower()].add_monomer(head,chain,nid=iid)
if not line:
try:
line=f.readline().rstrip()
if not line:
break
except:
break
if line.startswith(">"):
newid=retrieve_id(line)
head=line
indx += 1
chain = ''
if uniprot is not None:
ignore=False if newid[0] in uniprot else True
elif line.startswith("#") or line.startswith(' #'):
pass
else:
if not ignore:
chain += str(line)
return newseqs | PypiClean |
/Data-Transformer-0.1.1.tar.gz/Data-Transformer-0.1.1/transformer/xformer.py | import csv
import sys
import lxml.etree as ET
from jinja2 import Template
def xslt_transformer(input_file_path, transformer_file_path):
"""
Transforms a input XML file `input_file_path`
using the XSLT `transformer_file_path`.
"""
# perform transformation
dom = ET.parse(input_file_path)
xslt = ET.parse(transformer_file_path)
transform = ET.XSLT(xslt)
newdom = transform(dom)
return ET.tostring(newdom, pretty_print=True)
def simple_transformer(csv_input_file_path, transformer_file_path, separator="\n", row_pause=False,
process_row=None, output=None):
"""
Transforms a CSV file `csv_input_file_path` using the Transformation file in
`transformer_file_path`.
"""
# read contents of the transformer
transformer_contents = ""
with open(transformer_file_path, 'r') as transformer_file:
transformer_contents = transformer_file.read()
if len(transformer_contents) == 0:
raise ValueError("Transformer is empty.")
# parse the csv file one row at at time
with open(csv_input_file_path, 'rb') as csvfile:
reader = csv.reader(csvfile)
row_count = 0
fieldnames = []
for row in reader:
output_template = transformer_contents.strip()
row_count += 1
# retrieve field names during first pass
if row_count == 1:
fieldnames = row
continue
# process only requested row (if requested)
if process_row and process_row != row_count:
continue
for i, value in enumerate(row):
output_template = output_template.replace("$%s" % fieldnames[i], value)
if output:
with open(output, 'wb') as file:
file.write(output_template)
print "Output saved to: %s" % output
else:
sys.stdout.write(output_template)
sys.stdout.write("%s\n" % separator)
if row_pause:
raw_input("Press ENTER to continue.")
def jinja_transform(csv_input_file_path, transformer_file_path, separator="\n", row_pause=False,
process_row=None, output=None):
"""
Transforms a CSV file `csv_input_file_path` using the Transformation file in
`transformer_file_path` using Jinja2 Templating Language.
"""
# read contents of the transformer
transformer_contents = ""
with open(transformer_file_path, 'r') as transformer_file:
transformer_contents = transformer_file.read()
if len(transformer_contents) == 0:
raise ValueError("Transformer is empty.")
# parse the csv file one
with open(csv_input_file_path, 'rU') as csvfile:
csvfile.seek(0)
reader = csv.reader(csvfile, dialect='excel')
row_count = 0
fieldnames = []
for row in reader:
output_template = transformer_contents.strip()
row_count += 1
# retrieve field names during first pass
if row_count == 1:
fieldnames = row
continue
# process only requested row (if requested)
if process_row and process_row != row_count:
continue
template = Template(output_template)
context = {}
for i, value in enumerate(row):
context[fieldnames[i]] = value.strip()
if output:
with open(output, 'wb') as file:
file.write(template.render(**context))
print "Output saved to: %s" % output
else:
sys.stdout.write(template.render(**context))
sys.stdout.write("%s\n" % separator)
if row_pause:
raw_input("Press ENTER to continue.") | PypiClean |
/Curp-1.3.1.tar.gz/Curp-1.3.1/curp/twobody/amberbase.py | from __future__ import print_function
# standard module
import os, sys
import numpy
import time
# curp module
topdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if topdir not in sys.path:
sys.path.insert(0, topdir)
import clog as logger
################################################################################
class TwoBodyForceBase:
def __init__(self, topology, setting=None):
self.__tpl = topology
self.__setting = setting
self.__natom = self.__tpl.get_natom()
self.__forces = None
self.__ptype_to_energy = {}
self.__ptype_to_forces = {}
self.__ptype_to_displacement = {}
def get_pottypes(self):
return ['bond','angle','torsion','improper',
'coulomb14','vdw14','coulomb','vdw']
def set_module(self, module):
self.__mod = module
def get_module(self):
return self.__mod
def get_natom(self):
return self.__natom
def setup(self, interact_table, check=False):
self.__interact_table = interact_table
max_tbf = self.get_maxpair(interact_table)
self._setup_init(max_tbf, check)
self._setup_bond()
self._setup_angle()
self._setup_torsion()
self._setup_improper()
self._setup_coulomb14()
self._setup_vdw14()
self._setup_coulomb()
self._setup_vdw()
def cal_force(self, crd):
# initialize
self.initialize(crd)
# calculate the bonded components.
for ptype in ('bond','angle','torsion','improper','coulomb14','vdw14'):
self.cal_bonded(ptype)
# calculate the nonbonded components.
for t in self.__interact_table:
self.cal_coulomb(t)
self.cal_vdw(t)
return self.__forces
def _setup_init(self, max_tbf, check=False):
self.__mod.setup(natom=self.get_natom(), check=check,
bonded_pairs = self.__tpl.get_bonded_pairs(), max_tbf=max_tbf)
def _setup_bond(self):
"""Prepare the parameter for the bond calculation."""
mod = self.__setup_bondtype('bond')
mod.ibnd_to_itbf = self.__tpl.get_ibnd_to_ipair()
def _setup_angle(self):
"""Prepare the parameter for the angle calculation."""
mod = self.__setup_bondtype('angle')
mod.iang_to_itbf = self.__tpl.get_iang_to_ipair()
def _setup_torsion(self):
"""Prepare the parameter for the torsion calculation."""
mod = self.__setup_bondtype('torsion')
mod.itor_to_itbf = self.__tpl.get_itor_to_ipair()
def _setup_improper(self):
"""Prepare the parameter for the improper torsion calculation."""
mod = self.__setup_bondtype('improper')
mod.itor_to_itbf = self.__tpl.get_iimp_to_ipair()
def __setup_bondtype(self, btype_name):
"""Prepare the parameter for the calculations without coulomb and vdw.
"""
info = getattr(self.__tpl, 'get_'+btype_name+'_info')()
mod_type = getattr(self.__mod, btype_name)
for key in info.keys():
value = info[key]
setattr(mod_type, key, value)
if btype_name == 'torsion':
logger.info('The number of torsions :', len(mod_type.four_atoms))
if btype_name == 'improper':
logger.info('The number of impropers :', len(mod_type.four_atoms))
return mod_type
def _setup_coulomb(self):
"""Prepare the parameter for the coulomb calculation."""
coulomb = self.__mod.coulomb
info = self.__tpl.get_coulomb_info()
coulomb.charges = info['charges']
coulomb.cutoff_length = self.__setting.curp.coulomb_cutoff_length
def _setup_vdw(self):
"""Prepare the parameter for the vdw calculation."""
vdw = self.__mod.vdw
info = self.__tpl.get_vdw_info()
vdw.atom_types = info['atom_types']
vdw.c6s = info['c6s']
vdw.c12s = info['c12s']
vdw.cutoff_length = self.__setting.curp.vdw_cutoff_length
def _setup_coulomb14(self):
"""Prepare the parameter for the coulomb calculation."""
coulomb14 = self.__mod.coulomb14
info = self.__tpl.get_coulomb_info()
coulomb14.charges = info['charges']
# return self.__tpl.get_i14_to_ipair()
i14_to_itbf = self.__tpl.get_i14_to_ipair()
# if len(self.__tpl.get_i14_to_ipair())==1:
# i14_to_itbf = numpy.array([])
# else:
# i14_to_itbf = self.__tpl.get_i14_to_ipair()
# coulomb14.setup(info['charges'], i14_to_itbf)
coulomb14.i14_to_itbf = self.__tpl.get_i14_to_ipair()
def _setup_vdw14(self):
"""Prepare the parameter for the vdw calculation."""
vdw14 = self.__mod.vdw14
info = self.__tpl.get_vdw_info()
vdw14.atom_types = info['atom_types']
vdw14.c6s = info['c6s']
vdw14.c12s = info['c12s']
# if len(self.__tpl.get_i14_to_ipair())==1:
# i14_to_itbf = numpy.array([])
# else:
i14_to_itbf = self.__tpl.get_i14_to_ipair()
# vdw14.setup(info['atom_types'],info['c6s'],info['c12s'], i14_to_itbf)
# print(self.__tpl.get_i14_to_ipair())
vdw14.i14_to_itbf = self.__tpl.get_i14_to_ipair()
# print(vdw14.i14_to_itbf )
logger.info( 'The number of 1-4 interactions', len(i14_to_itbf))
def initialize(self, crd):
self.__mod.initialize(crd)
self.__forces = numpy.zeros( [self.__natom, 3] )
self.__ptype_to_energy = {}
self.__ptype_to_forces = {}
def cal_bonded(self, bond_type):
"""Calculate the pairwise forces using the bonded type modules.
Types are bond, angle, torsion, improper, coulomb14 and vdw14.
"""
mod = getattr(self.__mod, bond_type)
mod.calculate()
self.__forces += mod.forces
# store energy and forces
self.__ptype_to_energy[bond_type] = mod.energy
self.__ptype_to_forces[bond_type] = mod.forces
self.__ptype_to_displacement[bond_type] = mod.displacement
#DEBUG
# print('** {} forces **'.format(bond_type))
# for iatm_1, f in enumerate(mod.forces):
# print("{:5>} {:15.7f}{:15.7f}{:15.7f}".format(
# iatm_1+1, f[0],f[1],f[2]))
# print()
# if bond_type == 'bond':
# print('** {} pairwise forces **'.format(bond_type))
# for ibnd_1, itbf in enumerate(mod.ibnd_to_itbf):
# tbf = mod.tbforces[itbf-1]
# iatm, jatm = mod.two_atoms[ibnd_1]
# print("{:5>} {:5>} {:5>} {:15.7f}{:15.7f}{:15.7f}".format(
# ibnd_1+1, iatm,jatm, tbf[0],tbf[1],tbf[2]))
# print()
#DEBUG
return dict(energy = mod.energy,
forces = mod.forces,
tbforces = mod.tbforces,
displacement = mod.displacement)
def cal_coulomb(self, table):
return self._cal_nonbond(table, 'coulomb')
def cal_vdw(self, table):
return self._cal_nonbond(table, 'vdw')
def _cal_nonbond(self, table, pottype):
"""Calculate the pairwise forces using the bonded type modules.
Types are coulomb and vdw.
"""
mod = getattr(self.__mod, pottype)
mod.calculate(table)
energy = mod.energy.copy()
forces = mod.forces.copy()
displacement = mod.displacement
# store energy, forces and distance
if pottype not in self.__ptype_to_energy:
self.__ptype_to_energy[pottype] = energy
else:
self.__ptype_to_energy[pottype] += energy
self.__forces += forces
if pottype not in self.__ptype_to_forces:
self.__ptype_to_forces[pottype] = forces
else:
self.__ptype_to_forces[pottype] += forces
self.__ptype_to_displacement[pottype] = displacement
# get pairwise forces
tbforces = mod.tbforces.copy()
return dict(energy = mod.energy,
forces = mod.forces,
tbforces = tbforces,
displacement = displacement)
def get_forces(self, pottype):
"""Return the calculated force with givin potential type."""
return self.__ptype_to_forces[pottype]
def get_energy(self, pottype):
"""Return the calculated energy."""
return self.__ptype_to_energy[pottype]
def output_energy(self):
"""Output the energy."""
logger.info_cycle(' ** Output energy **')
for pottype in self.get_pottypes():
energy = self.get_energy(pottype)
logger.info_cycle(' {:>10} : {:>}'.format(pottype, energy))
logger.info_cycle()
def output_force(self):
logger.debug_cycle(' ** Output force **')
for pottype in self.get_pottypes():
logger.debug_cycle(' [ {} force ] '.format(pottype))
force = self.get_forces(pottype)
for iatm_1, f in enumerate(force):
logger.debug_cycle(
' {:>8} : {:>12.8f} {:>12.8f} {:>12.8f}'.format(
iatm_1+1, f[0], f[1], f[2]))
logger.debug_cycle(' [ Total force ] '.format(pottype))
for iatm_1, f in enumerate(self.__forces):
logger.debug_cycle(
' {:>8} : {:>12.8f} {:>12.8f} {:>12.8f}'.format(
iatm_1+1, f[0], f[1], f[2]))
logger.debug_cycle()
def output_bonded(self, results, pot_type):
print("[{}]".format(pot_type))
print("== Energy = ")
print(results['energy'])
print()
print("== Forces ==")
print(results['forces'])
print()
print('== Two-body forces ==')
caltype_mod = getattr(self.get_module(), pot_type)
caltype_mod.print_tbforce()
print()
# def write_bond(self):
# fmt = "{iatm} {jatm} {x} {y} {z}"
# tbfs = self.__mod.bond.tbforces
# two_atoms = self.__tpl.get_bond_info()['two_atoms']
# for two, tbf in zip(two_atoms, tbfs):
# iatm, jatm = two
# print(fmt.format(
# iatm=iatm, jatm=jatm, x=tbf[0], y=tbf[1],z=tbf[2]))
# def write_energy(self):
# pass
def output_nonbonded(self, results, pot_type, table):
print("[{}]".format(pot_type))
print("== Energy == ")
print(results['energy'])
print()
print("== Forces ==")
print(results['forces'])
print()
print('== Two-body forces ==')
tbfs = results['tbforces']
itbf = 0
for iatm, jatm_beg, jatm_end in table:
for jatm in range(jatm_beg, jatm_end+1):
itbf += 1
print(iatm, jatm, tbfs[itbf-1])
print()
def get_maxpair(self, interact_table):
maxpair = 0
for t in interact_table:
npair = 0
for iatm, jatm_beg, jatm_end in t:
npair += jatm_end - jatm_beg + 1
maxpair = max(maxpair, npair)
return maxpair
class TwoBodyForce(TwoBodyForceBase):
def __init__(self, topology, setting=None):
TwoBodyForceBase.__init__(self, topology, setting)
import lib_amberbase
self.set_module(lib_amberbase)
if __name__ == '__main__':
import numpy
class Setting:
class Curp:
coulomb_cutoff_length = 10.0
vdw_cutoff_length = 10.0
curp = Curp()
import os, sys
topdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if topdir not in sys.path:
sys.path.insert(0, topdir)
from forcefield.amberbase import ConverterBase
class DummyTopology(ConverterBase):
def __init__(self):
ConverterBase.__init__(self, None)
def get_mol_info(self): pass
def convert(self): pass
def get_residue_info(self): pass
def get_pbc_info(self): pass
def get_natom(self):
return 5
def get_bond_info(self):
return dict(
two_atoms = [[1,2], [1,3], [4,5]],
force_consts = [1.0, 1.5, 1.9],
length_eqs = [1.0, 1.2, 0.9] )
def get_angle_info(self):
return dict(
three_atoms = [[1,2,3], [2,3,4], [1,2,5]],
force_consts = [1.5, 2.0, 3.0],
theta_eqs = [90.0, 105.0, 120.0] )
def get_torsion_info(self):
return dict(
four_atoms = [[1,2,3,4], [2,1,4,5]],
num_torsions = [1, 1],
num_freqs = [2, 5],
force_consts = [1.5, 1.0],
initial_phases = [180.0, 0.0] )
def get_improper_info(self):
return dict(
four_atoms = [[1,2,3,4], [2,1,4,5]],
num_torsions = [1, 1],
num_freqs = [2, 5],
force_consts = [1.5, 1.0],
initial_phases = [180.0, 0.0] )
def get_coulomb_info(self):
return dict(
charges = [0.5, 0.1, 0.3, 0.01, -0.6] )
def get_vdw_info(self):
return dict(
atom_types = [1, 2, 1, 3, 1],
c6s = [[100.0, 50.0, 35.5],
[50.0, 200.0, 10.0],
[35.0, 10.0, 300.0]],
c12s = [[300.0, 91.0, 25.0],
[91.0, 200.0, 15.0],
[25.0, 15.0, 100.0]] )
def get_bonded14_pairs(self):
return [[1,4],[2,5]]
# setup
tbcal = TwoBodyForce(DummyTopology(), Setting())
tbcal.setup(max_tbf=10)
# # initialize with coordinate
crd = numpy.array(
[[1.0, 2.0, 3.0],
[2.0, 1.0, 1.5],
[3.0, 3.0, 3.0],
[0.0, 0.0, 0.0],
[5.5, 5.1, 4.5]])
tbcal.initialize(crd)
# # calculate two-body forces and output its data
l = ['bond', 'angle', 'torsion', 'improper', 'coulomb14', 'vdw14']
# l = ['coulomb14', 'vdw14']
for caltype in l:
res = tbcal.cal_bonded(caltype)
tbcal.output_bonded(res, caltype)
table = [[1, 2, 5], [2, 3, 5], [3,4,5], [4, 5, 5] ]
for caltype in ['coulomb', 'vdw']:
res = getattr(tbcal, 'cal_'+caltype)(table)
tbcal.output_nonbonded(res, caltype, table)
# crd = numpy.array(
# [[2.0, 3.0, 4.1],
# [3.0, 2.0, 2.5],
# [4.0, 4.0, 4.0],
# [1.0, 1.0, 1.0],
# [9.0, 9.1, 9.2]])
# tbcal.initialize(crd, check=True)
# tbcal.setup()
# results = tbcal.cal_bonded()
# output(results)
# from benchmarker import Benchmarker
# with Benchmarker(width=20) as bm:
# with bm('setup'):
# crd = numpy.array(
# [[2.0, 3.0, 4.1],
# [3.0, 2.0, 2.5],
# [4.0, 4.0, 4.0],
# [1.0, 1.0, 1.0],
# [9.0, 9.1, 9.2]])
# tbcal.initialize(crd, check=False)
# tbcal.setup()
# with bm('bonded'):
# results = tbcal.cal_bonded()
# output(results)
# table = [[1, 2, 5], [2, 3, 3], [2, 5, 5]]
# with bm('coulomb'):
# results = tbcal.cal_coulomb(table)
# output(results)
# with bm('vdw'):
# results = tbcal.cal_vdw(table)
# output(results) | PypiClean |
/HTSQL-2.3.3.tar.gz/HTSQL-2.3.3/src/htsql/core/cmd/act.py |
from ..adapter import Adapter, adapt
from ..error import Error, act_guard
from ..util import Clonable
from .command import Command, UniversalCmd, DefaultCmd, FormatCmd, FetchCmd
from .summon import recognize
from .embed import embed
from ..syn.parse import parse
from ..syn.syntax import Syntax
from ..fmt.emit import emit, emit_headers
from ..fmt.accept import accept
class UnsupportedActionError(Error):
pass
class Action(Clonable):
pass
class ProduceAction(Action):
def __init__(self, environment=None):
self.environment = environment
class SafeProduceAction(ProduceAction):
def __init__(self, environment=None, cut=None):
self.environment = environment
self.cut = cut
class AnalyzeAction(Action):
def __init__(self, environment=None):
self.environment = environment
class RenderAction(Action):
def __init__(self, environ):
self.environ = environ
class Act(Adapter):
adapt(Command, Action)
def __init__(self, command, action):
assert isinstance(command, Command)
assert isinstance(action, Action)
self.command = command
self.action = action
def __call__(self):
raise UnsupportedActionError("unsupported action")
class ActUniversal(Act):
adapt(UniversalCmd, Action)
def __call__(self):
return act(self.command.query, self.action)
class ActDefault(Act):
adapt(DefaultCmd, Action)
def __call__(self):
command = FetchCmd(self.command.syntax)
return act(command, self.action)
class RenderFormat(Act):
adapt(FormatCmd, RenderAction)
def __call__(self):
format = self.command.format
product = produce(self.command.feed)
status = "200 OK"
headers = emit_headers(format, product)
body = emit(format, product)
return (status, headers, body)
class RenderProducer(Act):
adapt(Command, RenderAction)
@classmethod
def __follows__(component, other):
return True
def __call__(self):
format = accept(self.action.environ)
product = produce(self.command)
status = "200 OK"
headers = emit_headers(format, product)
body = emit(format, product)
return (status, headers, body)
def act(command, action):
assert isinstance(command, (Command, Syntax, unicode, str))
assert isinstance(action, Action)
if not isinstance(command, Command):
command = recognize(command)
with act_guard(command):
return Act.__invoke__(command, action)
def produce(command, environment=None, **parameters):
environment = embed(environment, **parameters)
action = ProduceAction(environment)
return act(command, action)
def safe_produce(command, cut, environment=None, **parameters):
environment = embed(environment, **parameters)
action = SafeProduceAction(environment, cut)
return act(command, action)
def analyze(command, environment=None, **parameters):
environment = embed(environment, **parameters)
action = AnalyzeAction(parameters)
return act(command, action)
def render(command, environ):
action = RenderAction(environ)
return act(command, action) | PypiClean |
/Durus-4.2.tar.gz/Durus-4.2/durus/storage.py | import heapq
from durus.serialize import unpack_record, split_oids, extract_class_name
from durus.utils import int8_to_str
import durus.connection
class Storage (object):
"""
This is the interface that Connection requires for Storage.
"""
def __init__(self):
raise RuntimeError("Storage is abstract")
def load(self, oid):
"""Return the record for this oid.
Raises a KeyError if there is no such record.
May also raise a ReadConflictError.
"""
raise NotImplementedError
def begin(self):
"""
Begin a commit.
"""
raise NotImplementedError
def store(self, oid, record):
"""Include this record in the commit underway."""
raise NotImplementedError
def end(self, handle_invalidations=None):
"""Conclude a commit.
This may raise a ConflictError.
"""
raise NotImplementedError
def sync(self):
"""() -> [oid:str]
Return a list of oids that should be invalidated.
"""
raise NotImplementedError
def new_oid(self):
"""() -> oid:str
Return an unused oid. Used by Connection for serializing new persistent
instances.
"""
raise NotImplementedError
def close(self):
"""Clean up as needed.
"""
def get_packer(self):
"""
Return an incremental packer (a generator), or None if this storage
does not support incremental packing.
Used by StorageServer.
"""
return None
def pack(self):
"""If this storage supports it, remove obsolete records."""
return None
def bulk_load(self, oids):
"""(oids:sequence(oid:str)) -> sequence(record:str)
"""
for oid in oids:
yield self.load(oid)
def gen_oid_record(self, start_oid=None, batch_size=100):
"""(start_oid:str = None, batch_size:int = 100) ->
sequence((oid:str, record:str))
Returns a generator for the sequence of (oid, record) pairs.
If a start_oid is given, the resulting sequence follows a
breadth-first traversal of the object graph, starting at the given
start_oid. This uses the storage's bulk_load() method because that
is faster in some cases. The batch_size argument sets the number
of object records loaded on each call to bulk_load().
If no start_oid is given, the sequence may include oids and records
that are not reachable from the root.
"""
if start_oid is None:
start_oid = durus.connection.ROOT_OID
todo = [start_oid]
seen = set()
while todo:
batch = []
while todo and len(batch) < batch_size:
oid = heapq.heappop(todo)
if oid not in seen:
batch.append(oid)
seen.add(oid)
for record in self.bulk_load(batch):
oid, data, refdata = unpack_record(record)
yield oid, record
for ref in split_oids(refdata):
if ref not in seen:
heapq.heappush(todo, ref)
def gen_referring_oid_record(storage, referred_oid):
"""(storage:Storage, referred_oid:str) -> sequence([oid:str, record:str])
Generate oid, record pairs for all objects that include a
reference to the `referred_oid`.
"""
for oid, record in storage.gen_oid_record():
if referred_oid in split_oids(unpack_record(record)[2]):
yield oid, record
def gen_oid_class(storage, *classes):
"""(storage:Storage, classes:(str)) ->
sequence([(oid:str, class_name:str)])
Generate a sequence of oid, class_name pairs.
If classes are provided, only output pairs for which the
class_name is in `classes`.
"""
for oid, record in storage.gen_oid_record():
class_name = extract_class_name(record)
if not classes or class_name in classes:
yield oid, class_name
def get_census(storage):
"""(storage:Storage) -> {class_name:str, instance_count:int}"""
result = {}
for oid, class_name in gen_oid_class(storage):
result[class_name] = result.get(class_name, 0) + 1
return result
def get_reference_index(storage):
"""(storage:Storage) -> {oid:str : [referring_oid:str]}
Return a full index giving the referring oids for each oid.
This might be large.
"""
result = {}
for oid, record in storage.gen_oid_record():
for ref in split_oids(unpack_record(record)[2]):
result.setdefault(ref, []).append(oid)
return result
class MemoryStorage (Storage):
"""
A concrete Storage that keeps everything in memory.
This may be useful for testing purposes.
"""
def __init__(self):
self.records = {}
self.transaction = None
self.oid = -1
def new_oid(self):
self.oid += 1
return int8_to_str(self.oid)
def load(self, oid):
return self.records[oid]
def begin(self):
self.transaction = {}
def store(self, oid, record):
self.transaction[oid] = record
def end(self, handle_invalidations=None):
self.records.update(self.transaction)
self.transaction = None
def sync(self):
return [] | PypiClean |
/Elevator-0.5c.tar.gz/Elevator-0.5c/CHANGES.rst |
0.5c / 2013-03-18
==================
* Fix : DatabaseStore last acess property
* Fix: init script creates pid file
* Fix: pyzmq version
* Fix: explicit daemon init failures
0.5 / 2013-02-01
==================
* Remove: legacy setup_loggers function
* fix #123: exposing a database object
* ref #123: Renamed DatabasesHandler to DatabaseStore
* Fix: elevator benchmarks
* Fix: supervisor test should remove their tests files
* Add: tests for backend atm
* Add: backend supervisor tests + fixes
* update: enhance backend majordome management
* fix #125: backend does not instantiate it's own DatabasesHandler anymore
* Fix: elevator tests fakers now uses a clear files/dirs pattern
* Update: more obvious DatabaseHandler args names
* Add: benchmarks using hurdles and pyelevator
* update #120 : Auto re-mount unmounted database on new requests
* Fix : backend properly tears down workers
* fix #120, fix #91: Implement Majordom watcher thread
* Update #120: set databases last access marker
* Update 120: move ocd worker to backend module
* Update #121: implement last activity action on workers
* Update #121: Documented worker
* Update #121: Workers poll to reduce cpu usage + backend refactoring
* Update #121: use an internal message protocol between supervisor and workers
* Update: Moved the backend elements in their own module
* Fix #122: workers now set their processing state
* Refactor: moved loggers init in their own log module
* Update #121: fixed workers stop action
* Update #121: Added constants to normalize interaction with workers
* Add #121: basic workers supervisor implementation, implies a lot of refactoring
* Update: rename server poller
* Update: use ROUTER/DEALER terminology and rename workerpool and proxy to backend and frontend
* Update: renamed conf module to args
0.4b / 2013-01-28
==================
* Fix: Refactor api tests
* Fix #119: Range and Slice now support include_key, and include_value params
* Remove: max cache management + Add: Lru cache and bloom filters
0.4a / 2013-01-22
==================
* Add : Implement PING command
* Add : Cli module
* Add : Debian packaging files
* Update: Use plyvel leveldb backend
* Update: Use plyvel bloom filter in read operations
* Update: Add experimental command line doc
* Update: Set fabfile as a module
* Update: Documentation to fit with plyvel
* Update #114: Run MGet against db snapshot
* Update : working cmdline
* Fix #114: Enhance MGET perfs by acting on a min/max keys range slice
* Fix #113: handle MGET arguments in command line
* Many other little updates and fixes, see logs
0.4 / 2012-10-22
==================
* Add: restore theme
* Add : Base sphinx documentation
* Update : new License MIT
* Fix #86: IOError when bad config file supplied as cmdline argument
* Fix #95: Elevator will start and log errors even though some databases are corrupted
* Fix : log-level debug messages format
* Fix : travis, tests, requirements
0.3d / 2012-10-19
==================
* Add : Request error for invalid request messages
* Update #91: Mount default at server startup
* Update #91: Mount/Unmount command + auto-mount on connect
* Update #91: add a ticker class, which executes a function every x seconds
* Update #30, Update #99: Compress Responses on demande (request.meta['compression'])
* Update #88, Update #99: now responses comes in two parts header+content
* Update #88: Fix MGet, Range, Slice return values types to suite with new responses format
* Update #88: Refactored Request/Responses format
* Update : Refactored DatabasesHandler internal storage
* Update : Few refactoring on loggers handling
* Update : Refactored DBConnect no more db_uid to provide in request
* Fix #97: provide mono-letters args
* Fix #89: Log requests/responses on log-level=DEBUG
* Fix #87: Refactored logging
* Fix #100: Non blocking workers, graceful shutdown == PERFORMANCES
* Fix #98: Activity logging on both file and stdout
* Fix #101: fixed ipc handling
* Fix : api tests for compatibility with new Req/Resp
* Fix : refactored tests for new Range/Slice behavior when size == 1
* Fix : Mount/Unmount passed args
| PypiClean |
/Jetforce-0.9.1-py3-none-any.whl/jetforce/server.py | from __future__ import annotations
import socket
import sys
import typing
from twisted.internet import reactor as _reactor
from twisted.internet.base import ReactorBase
from twisted.internet.endpoints import SSL4ServerEndpoint
from twisted.internet.protocol import Factory
from twisted.internet.tcp import Port
from .__version__ import __version__
from .app.base import ApplicationCallable
from .protocol import GeminiProtocol
from .tls import GeminiCertificateOptions, generate_ad_hoc_certificate
if sys.stderr.isatty():
CYAN = "\033[36m\033[1m"
RESET = "\033[0m"
else:
CYAN = ""
RESET = ""
ABOUT = fr"""
{CYAN}You are now riding on...
_________ _____________
______ /______ /___ __/_______________________
___ _ /_ _ \ __/_ /_ _ __ \_ ___/ ___/ _ \
/ /_/ / / __/ /_ _ __/ / /_/ / / / /__ / __/
\____/ \___/\__/ /_/ \____//_/ \___/ \___/{RESET}
An Experimental Gemini Server, v{__version__}
https://github.com/michael-lazar/jetforce
"""
class GeminiServer(Factory):
"""
Wrapper around twisted's TCP server that handles most of the setup and
plumbing for you.
"""
protocol_class = GeminiProtocol
# The TLS twisted interface class is confusingly named SSL4, even though it
# will accept either IPv4 & IPv6 interfaces.
endpoint_class = SSL4ServerEndpoint
def __init__(
self,
app: ApplicationCallable,
reactor: ReactorBase = _reactor,
host: str = "127.0.0.1",
port: int = 1965,
hostname: str = "localhost",
certfile: typing.Optional[str] = None,
keyfile: typing.Optional[str] = None,
cafile: typing.Optional[str] = None,
capath: typing.Optional[str] = None,
):
if certfile is None:
self.log_message("Generating ad-hoc certificate files...")
certfile, keyfile = generate_ad_hoc_certificate(hostname)
self.app = app
self.reactor = reactor
self.host = host
self.port = port
self.hostname = hostname
self.certfile = certfile
self.keyfile = keyfile
self.cafile = cafile
self.capath = capath
def log_access(self, message: str) -> None:
"""
Log standard "access log"-type information.
"""
print(message, file=sys.stdout)
def log_message(self, message: str) -> None:
"""
Log special messages like startup info or a traceback error.
"""
print(message, file=sys.stderr)
def on_bind_interface(self, port: Port) -> None:
"""
Log when the server binds to an interface.
"""
sock_ip, sock_port, *_ = port.socket.getsockname()
if port.addressFamily == socket.AF_INET:
self.log_message(f"Listening on {sock_ip}:{sock_port}")
else:
self.log_message(f"Listening on [{sock_ip}]:{sock_port}")
def buildProtocol(self, addr: typing.Any) -> GeminiProtocol:
"""
This method is invoked by twisted once for every incoming connection.
It builds the instance of the protocol class, which is what actually
implements the Gemini protocol.
"""
return GeminiProtocol(self, self.app)
def initialize(self) -> None:
"""
Install the server into the twisted reactor.
"""
certificate_options = GeminiCertificateOptions(
certfile=self.certfile,
keyfile=self.keyfile,
cafile=self.cafile,
capath=self.capath,
)
interfaces = [self.host] if self.host else ["0.0.0.0", "::"]
for interface in interfaces:
endpoint = self.endpoint_class(
reactor=self.reactor,
port=self.port,
sslContextFactory=certificate_options,
interface=interface,
)
endpoint.listen(self).addCallback(self.on_bind_interface)
def run(self) -> None:
"""
This is the main server loop.
"""
self.log_message(ABOUT)
self.log_message(f"Server hostname is {self.hostname}")
self.log_message(f"TLS Certificate File: {self.certfile}")
self.log_message(f"TLS Private Key File: {self.keyfile}")
self.initialize()
self.reactor.run() | PypiClean |
/Abyss-Shell-1.0.0.tar.gz/Abyss-Shell-1.0.0/Abyss/__init__.py | from colorama import Fore, Back, init
from os import error, system, truncate
from tqdm import tqdm
import pyfiglet
import requests
import json
import os
# #https://download1518.mediafire.com/pjbmrxu3fyig/ygh79hofhj9npol/config.json
dir_path = os.path.dirname(os.path.realpath(__file__))
def download(url, filename, callback):
print(Fore.GREEN)
response = requests.get(url, stream=True)
total_size_in_bytes= int(response.headers.get('content-length', 0))
block_size = 1024 #1 Kibibyte
progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
with open(filename, 'wb') as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
print("ERROR, something went wrong")
callback('error')
callback('successful')
def ondownload(state):
if state == "successful":
print(f'{Fore.GREEN}[ABYSS-INFO]:config.json file downloaded.')
print(f'{Fore.GREEN}[ABYSS-INFO]: Please run again the file.')
else:
print(f'{Fore.GREEN}[ABYSS-ERROR]: Error downloading config.json file, check your iternet connection or try again.')
exit()
class Abyss:
def __init__(self):
self.foreground = None
self.background = None
self.error_color = None
self.Info_color = None
self.title_font = None
self.title = None
self.subtitle = None
def init(themename,path,filename):
try:
f = open(f'{path}/{filename}', 'r')
c = f.read()
js = json.loads(c)
foreground = js[themename]['style']['foreground'].upper()
background = js[themename]['style']['background'].upper()
error_color = js[themename]['style']['error_color'].upper()
info_color = js[themename]['style']['info_color'].upper()
title_font = js[themename]['style']['title_font'].upper()
title = js[themename]['shell_info']['title'].upper()
subtitle = js[themename]['shell_info']['subtitle'].upper()
foreground = getattr(Fore, foreground)
background = getattr(Fore, background)
error_color = getattr(Fore, error_color)
info_color = getattr(Fore, info_color)
build(Abyss,foreground,error_color,info_color,background,title_font,title,subtitle)
except FileNotFoundError:
print(f'{Fore.RED}[ABYSS-ERROR]:File config.json not found.')
print(f'{Fore.GREEN}[ABYSS-INFO]: Downloading config.json file...')
download('https://drive.google.com/u/0/uc?id=15hGFtFkaupcuzcpJYdU5vmLmZwK5pywz&export=download',
f'{path}/{filename}', ondownload)
def generate_menu(options):
option = None
options[f'{len(options)+1}']=('salida', exitmenu)
while option != len(options):
show_menu(options)
option = read_option(options)
run(option, options)
def show_menu(options):
print('Seleccione una opción:')
for i in sorted(options):
print(f' {i}) {options[i][0]}')
def read_option(options):
while (a := input('Opción: ')) not in options:
print('Opción incorrecta, vuelva a intentarlo.')
return a
def run(option, options):
options[option][1]()
def exitmenu():
print('Exiting')
exit()
def build(self,foreground=Fore.BLACK, error_color=Fore.BLUE, info_color=Fore.CYAN, background='f0', title_font="wavy",title="ABYSS",subtitle="Shell Library, KAT"):
self.foreground = foreground
self.background = background
self.error_color = error_color
self.Info_color = info_color
self.title_font = title_font
self.title = title
self.subtitle = subtitle
cleanScreen()
title = pyfiglet.figlet_format(title, font = title_font)
print(foreground)
print(title)
print(subtitle)
def cleanScreen():
_ = system('cls') | PypiClean |
/Downpour-0.2.tar.gz/Downpour-0.2/downpour/web/templates/media/js/common.js | function AJAX() {
this.http = AJAX.CreateNewHTTPObject();
}
AJAX.prototype.get = function(url, callback) {
req = this.http;
req.open('GET', url, true);
req.setRequestHeader('Content-Type', 'text/plain');
req.onreadystatechange = function(transport) {
if ((req.readyState == 4) && (req.status == 200))
callback(req.responseText);
}
req.send(null);
}
AJAX.prototype.getJSON = function(url, callback) {
this.get(url, function(response) {
callback(eval('(' + response + ')'));
});
}
AJAX.CreateNewHTTPObject = function() {
var xmlhttp;
/*@cc_on
@if (@_jscript_version >= 5)
try {
xmlhttp = new ActiveXObject("Msxml2.XMLHTTP");
}
catch (e) {
try {
xmlhttp = new ActiveXObject("Microsoft.XMLHTTP");
}
catch (E) {
xmlhttp = false;
}
}
@else
xmlhttp = false;
@end @*/
if (!xmlhttp && typeof XMLHttpRequest != 'undefined') {
try {
xmlhttp = new XMLHttpRequest();
}
catch (e) {
xmlhttp = false;
}
}
return xmlhttp;
}
function findElementsByClassName(classname, root) {
var res = [];
var elts = (root||document).getElementsByTagName('*')
var re = new RegExp('\\b'+classname+'\\b');
for (var i = 0; i < elts.length; ++i)
if (elts[i].className.match(re))
res[res.length] = elts[i];
return res;
}
function addClass(el, c) {
cl = el.className ? el.className.split(/ /) : [];
for (var i = 0; i < cl.length; ++i)
if (cl[i] == c)
return;
cl[cl.length] = c;
el.className = cl.join(' ');
}
function removeClass(el, c) {
if (!el.className) return;
cl = el.className.split(/ /);
var nc = [];
for (var i = 0; i < cl.length; ++i)
if (cl[i] != c)
nc[nc.length] = cl[i];
el.className = nc.join(' ');
}
function stopEvent(e) {
var ev = window.event||e;
if (ev.stopPropagation)
ev.stopPropagation();
else
ev.cancelBubble = true;
}
function addEvent(el, name, handler) {
if (el.addEventListener) {
el.addEventListener(name, handler, false);
} else {
el.attachEvent('on'+name, handler);
}
}
function removeEvent(el, name, handler) {
if (el.removeEventListener) {
el.removeEventListener(name, handler, false);
} else {
el.detachEvent('on'+name, handler);
}
} | PypiClean |
/Affirmations-0.0.10.tar.gz/Affirmations-0.0.10/README.md | # Affirmations
A module that gives you a little bump of encouragement.
## Requirements
- A positive attitude
- A can-do spirit
## Installation
```bash
pip install Affirmations
```
## Usage
Decorate any function to get a random affirmation printed to stdout every time that function is run
```
from Affirmations import affirm
@affirm() # prints an affirmation to stdout 100% of the time this function is run
def hello_world():
print("hello")
@affirm(0.2) # prints an affirmation to stdout 20% of the time this function is run
def hello_world2():
print("hello")
hello_world()
```
```bash
hello
You are awesome!
``` | PypiClean |
/MindsDB-23.8.3.0.tar.gz/MindsDB-23.8.3.0/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py | import numpy as np
from numpy import dtype as np_dtype
import pandas as pd
from pandas.api import types as pd_types
from sqlalchemy.types import (
Integer, Float, Text
)
from mindsdb_sql.parser.ast import Insert, Identifier, CreateTable, TableColumn, DropTables
from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode
from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
from mindsdb.api.mysql.mysql_proxy.datahub.classes.tables_row import TablesRow
import mindsdb.utilities.profiler as profiler
class DBHandlerException(Exception):
pass
class IntegrationDataNode(DataNode):
type = 'integration'
def __init__(self, integration_name, ds_type, integration_controller):
self.integration_name = integration_name
self.ds_type = ds_type
self.integration_controller = integration_controller
self.integration_handler = self.integration_controller.get_handler(self.integration_name)
def get_type(self):
return self.type
def get_tables(self):
response = self.integration_handler.get_tables()
if response.type == RESPONSE_TYPE.TABLE:
result_dict = response.data_frame.to_dict(orient='records')
result = []
for row in result_dict:
result.append(TablesRow.from_dict(row))
return result
else:
raise Exception(f"Can't get tables: {response.error_message}")
def has_table(self, tableName):
return True
def get_table_columns(self, tableName):
return []
def create_table(self, table_name: Identifier, result_set, is_replace=False, is_create=False):
# is_create - create table
# is_replace - drop table if exists
# is_create==False and is_replace==False: just insert
table_columns_meta = {}
table_columns = []
for col in result_set.columns:
# assume this is pandas type
column_type = Text
if isinstance(col.type, np_dtype):
if pd_types.is_integer_dtype(col.type):
column_type = Integer
elif pd_types.is_numeric_dtype(col.type):
column_type = Float
table_columns.append(
TableColumn(
name=col.alias,
type=column_type
)
)
table_columns_meta[col.alias] = column_type
if is_replace:
# drop
drop_ast = DropTables(
tables=[table_name],
if_exists=True
)
result = self.integration_handler.query(drop_ast)
if result.type == RESPONSE_TYPE.ERROR:
raise Exception(result.error_message)
is_create = True
if is_create:
create_table_ast = CreateTable(
name=table_name,
columns=table_columns,
is_replace=True
)
result = self.integration_handler.query(create_table_ast)
if result.type == RESPONSE_TYPE.ERROR:
raise Exception(result.error_message)
insert_columns = [Identifier(parts=[x.alias]) for x in result_set.columns]
formatted_data = []
for rec in result_set.get_records():
new_row = []
for col in result_set.columns:
value = rec[col.alias]
column_type = table_columns_meta[col.alias]
python_type = str
if column_type == Integer:
python_type = int
elif column_type == Float:
python_type = float
try:
value = python_type(value) if value is not None else value
except Exception:
pass
new_row.append(value)
formatted_data.append(new_row)
if len(formatted_data) == 0:
# not need to insert
return
insert_ast = Insert(
table=table_name,
columns=insert_columns,
values=formatted_data
)
try:
result = self.integration_handler.query(insert_ast)
except Exception as e:
msg = f'[{self.ds_type}/{self.integration_name}]: {str(e)}'
raise DBHandlerException(msg) from e
if result.type == RESPONSE_TYPE.ERROR:
raise Exception(result.error_message)
@profiler.profile()
def query(self, query=None, native_query=None, session=None):
try:
if query is not None:
result = self.integration_handler.query(query)
else:
# try to fetch native query
result = self.integration_handler.native_query(native_query)
except Exception as e:
msg = str(e).strip()
if msg == '':
msg = e.__class__.__name__
msg = f'[{self.ds_type}/{self.integration_name}]: {msg}'
raise DBHandlerException(msg) from e
if result.type == RESPONSE_TYPE.ERROR:
raise Exception(f'Error in {self.integration_name}: {result.error_message}')
if result.type == RESPONSE_TYPE.OK:
return [], []
df = result.data_frame
# region clearing df from NaN values
# recursion error appears in pandas 1.5.3 https://github.com/pandas-dev/pandas/pull/45749
if isinstance(df, pd.Series):
df = df.to_frame()
try:
df = df.replace(np.NaN, pd.NA)
except Exception as e:
print(f'Issue with clearing DF from NaN values: {e}')
try:
df = df.where(pd.notnull(df), None)
except Exception as e:
print(f'Issue with clearing DF from NaN values: {e}')
# endregion
columns_info = [
{
'name': k,
'type': v
}
for k, v in df.dtypes.items()
]
data = df.to_dict(orient='records')
return data, columns_info | PypiClean |
/ESMValTool-2.9.0-py3-none-any.whl/esmvaltool/diag_scripts/autoassess/plot_autoassess_metrics.py | import logging
import os
import sys
import yaml
from esmvaltool.diag_scripts.autoassess._plot_mo_metrics import (
plot_nac,
read_model_metrics,
read_obs_metrics,
)
logger = logging.getLogger(__name__)
# Diagnostic that takes two datasets (control_model and exp_model
# and observational data (ERA-Interim and MERRA);
# plotting OBS is not yet supported; it will be, hold your horses
def get_cfg():
"""Read diagnostic script configuration from settings.yml."""
settings_file = sys.argv[1]
with open(settings_file) as file:
cfg = yaml.safe_load(file)
return cfg
def main():
"""Call the plotting script via command line."""
cfg = get_cfg()
logger.setLevel(cfg['log_level'].upper())
control_model = cfg['control_model']
exp_model = cfg['exp_model']
vsloc = exp_model + '_vs_' + control_model
file_exp = os.path.join(
os.path.dirname(os.path.dirname(cfg['plot_dir'])), cfg['diag_tag'],
cfg['diag_name'], vsloc, cfg['area'], exp_model, 'metrics.csv')
file_ref = os.path.join(
os.path.dirname(os.path.dirname(cfg['plot_dir'])), cfg['diag_tag'],
cfg['diag_name'], vsloc, cfg['area'], control_model, 'metrics.csv')
plot_title = ' '.join([cfg['area'], control_model, 'vs', exp_model])
# Read (and record) metrics files
# metrics = read_order_metrics(args.file_ord)
ref = read_model_metrics(file_ref)
tests = [read_model_metrics(file_exp)]
cfg['input_data'] = {'ref': {'filename': file_ref},
'exp': {'filename': file_exp}}
# var = read_model_metrics(args.file_var)
obs, acc = None, None
if 'additional_metrics' in cfg:
# choose the obs file to get the metrics from
file_obs = os.path.join(
os.path.dirname(os.path.dirname(cfg['plot_dir'])), cfg['diag_tag'],
cfg['diag_name'], vsloc, cfg['area'], cfg['error_metric'],
'metrics.csv')
(obs, acc) = read_obs_metrics(file_obs)
# Produce plot
plot_nac(
control_model, [exp_model],
ref,
tests,
metrics=None,
var=None,
obs=obs,
acc=acc,
extend_y=False,
title=plot_title,
ofile=cfg['plot_name'],
config=cfg)
if __name__ == '__main__':
logging.basicConfig(format="%(asctime)s [%(process)d] %(levelname)-8s "
"%(name)s,%(lineno)s\t%(message)s")
main() | PypiClean |
/Jupytils-0.41100000000000003.tar.gz/Jupytils-0.41100000000000003/Classification.py | import matplotlib.pyplot as plt
from numpy import *
from collections import Counter
import numpy as np
import pylab as pl
from matplotlib import colors
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets, cluster, preprocessing, decomposition, svm, datasets
from sklearn.svm import SVC
from sklearn.decomposition import PCA
import pandas as pd
import numpy.random as random
from mpl_toolkits.mplot3d import Axes3D
import glob
import os
import time
from IPython.display import display
from IPython.display import Image
import scipy;
from scipy import stats;
import sklearn;
import sklearn.ensemble;
import sklearn.neighbors
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from sklearn.metrics import *
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.multiclass import OneVsRestClassifier
import os
import subprocess
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn import tree
from sklearn import datasets
from IPython.display import Image
#import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pydotplus
from sklearn.externals.six import StringIO
# -*- coding: utf-8 -*-
def run_cv(X,y,clf_class,printDebug = False , clf=None):
# Construct a kfolds object
kf = sklearn.model_selection.KFold(n_splits=5,shuffle=True)
y_pred = y.copy()
# Iterate through folds\
i = 0;
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train = y[train_index]
# Initialize a classifier with key word arguments
clf = clf_class(**kwargs) if (clf is None) else clf;
if (printDebug): print ("*",i, end ="");
clf.fit(X_train,y_train)
y_pred[test_index] = clf.predict(X_test)
i = i +1;
if (printDebug): print ("*");
return y_pred, clf
# -*- coding: utf-8 -*-
def run_cvTT(X,y,clf_class,printDebug = True , clf=None):
X_train, X_test, y_train, y_test = model_selection.train_test_split( X, y, test_size=0.2, random_state=0);
def accuracy(y_true,y_pred):
# NumPy interprets True and False as 1. and 0.
return np.mean(y_true == y_pred)
#
# Call:
# cms = [("Decision Tree", [[25,24],[23,22]])]
#
def draw_confusion_matrices(confusion_matricies,class_names):
class_names = class_names.tolist() if type(class_names) != list else class_names
fig = plt.figure(figsize = (20,5))
for i,cm in enumerate(confusion_matricies):
classifierName, matrix = cm[0], cm[1]
#cmstr = str(cm)
ax = fig.add_subplot(1,8,i+1)
plt.subplots_adjust(wspace = .4);
cax = ax.matshow(matrix, cmap='seismic', interpolation='nearest')
#plt.title('CM: %s' % classifier + "\n" + cmstr)
plt.title(classifierName)
plt.grid(None)
if (i ==0 ):
i=0;
#fig.colorbar(cax);
ax.set_xticklabels([''] + class_names)
ax.set_yticklabels([''] + class_names)
plt.xlabel('Predicted')
plt.ylabel('True')
for (ii, jj), z in np.ndenumerate(matrix):
ax.text(jj, ii, '{:0.1f}'.format(z), ha='center', va='center',
bbox=dict(facecolor='white', edgecolor='0.3'))
plt.show()
##
# Draw most 15 significant
#
def DrawFeatureImportance(dft,clf, title="", ax =None, m=15):
if ( not hasattr(clf,'feature_importances_') ):
print ("No Feature Importance matrix for this classifier:", clf)
return;
# Get Feature Importance from the classifier
feature_importance = clf.feature_importances_
# Normalize The Features
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
sorted_idx10 = sorted_idx[-m:]; #[0:5] # TODO: Print Top 10 only ??
pos = np.arange(sorted_idx10.shape[0]) + .5
fc10=np.asanyarray(dft.columns.tolist())[sorted_idx10];
if ( ax == None):
plt.figure(figsize=(5, 3));
plt.barh(pos, feature_importance[sorted_idx10], align='center', color='#7A68A6')
plt.yticks(pos, fc10)
plt.xlabel('Relative: '+ title)
plt.title('Variable Importance')
if ( ax == None): plt.show()
def DrawFeatureImportanceMatrix(df, clfs):
fig = plt.figure(figsize = (25,3))
plt.subplots_adjust(wspace = 1.2);
for i in range(int (len(clfs)/2)) :
classifierName, clf = clfs[i*2], clfs[i*2+1]
if ( not hasattr(clf,'feature_importances_') ):
continue;
#cmstr = str(cm)
#print classifierName;
plt.subplots_adjust(wspace = 1.4);
ax = fig.add_subplot(1,8,i+1)
DrawFeatureImportance(df, clf, classifierName, ax)
#plt.title(classifierName)
plt.show()
# This function will take the categorical value or any column and
# either replaces inline or create a new column with enumeration of
# Values for a left column will be changed as shown to the right array:
# For ex. [a,a,a,b,b,c,c,c,c,d, d] => [0,0,0,1,1,2,2,2,2,3, 3]
#
def encodeCategorical(df, columnName, newColumnName = None, makeCopy = False):
df_mod = df.copy() if makeCopy else df;
targets = df_mod[columnName].unique()
mapToInt = {name: n for n, name in enumerate(targets)}
newColumnName = newColumnName if (newColumnName!=None) else columnName;
df_mod[newColumnName] = df_mod[columnName].replace(mapToInt)
return (df_mod, targets, mapToInt)
##########################################################################
#
''' Usage:
iris = datasets.load_iris()
X = iris.data;
X = iris.data[:, :2]
y = iris.target
clf = neighbors.KNeighborsClassifier(15, weights='uniform')
PlotDecisionBoundary(X,y,clf)
clf = SVC(kernel="linear")
PlotDecisionBoundary(X,y,clf)
'''
#
def PlotDecisionBoundary(X,y,clf, Xtest = None, ytest = None, desc=None):
if (Xtest is None or ytest is None):
Xtest = X;
ytest = y;
cs1 = [c for c in colors.cnames if not c.find("dark")]
cs1.sort(reverse = True)
colorsBold = ListedColormap(cs1)
cs2 = [c for c in colors.cnames if not c.find("light")]
cs2.sort(reverse = True)
colorsLight = ListedColormap(cs2)
#--> We must normalize before applying PCA
pca= PCA(n_components= 2)
pca.fit(X)
nX = pca.transform(X)
#nX = X
# Choose Any classifier of your choice
clf.fit(nX, y)
x_min, x_max = nX[:, 0].min() - 1, nX[:, 0].max() + 1
y_min, y_max = nX[:, 1].min() - 1, nX[:, 1].max() + 1
#x_min = 0.0; x_max = 1.0
#y_min = 0.0; y_max = 1.0
h=0.02
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=colorsLight, alpha =.6)
labels = unique(y)
ki = 0;
for k in labels:
lX = Xtest[ytest==k]
plt.scatter(lX[:, 0], lX[:, 1], c=cs1[ki], s=30, label=k, alpha=1)
#plt.scatter(nX[:, 0], nX[:, 1], c=y, s=30, cmap=colorsBold, alpha=1, label=[0,1,2])
ki += 1
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
classifierName = str(type(clf)).split(".")[-1][:-2]
score = clf.score(Xtest,ytest)
title = ("%s, Score: %.2f"%(classifierName, score))
title = desc or title
plt.title(title)
plt.legend()
#Classification Problems
# Usually in case of classifcation, it is best to draw scatter plot of
# Target Varible using Scatter plot
# df, t,m = encodeCategorical(dfL, "FiveTile1", "Target" );
# scatter_matrix(dfL, alpha=1, figsize=(10,10), s=100, c=df.Target);
# print "categorical Plot {}".format(m)
#
#
# Df - Data Frame that does not have a Predict Column
# y - Predict Column Series
def Classify(df, y,
printDebug = True ,
drawConfusionMatrix = True,
classifiers = None,
scale =True
):
if ( df is None or y is None):
raise Exception("No Data Given");
t = df.select_dtypes(exclude=[np.number])
if ( len(t.columns) > 0) :
raise Exception("nonnumeric columns? " + t.columns);
l = preprocessing.LabelEncoder()
class_names = y.unique()
y=l.fit_transform(y);
df.fillna(0, inplace=True)
X = df.as_matrix().astype(np.float)
if (scale):
scaler = StandardScaler()
X = scaler.fit_transform(X)
print ("Feature space holds %d observations and %d features" % X.shape)
print ("Unique target labels:", class_names)
cls = [# Note: SVM takes long time to run - so commenting out
#"SVM" , sklearn.svm.SVC(),
"Random Forest" , sklearn.ensemble.RandomForestClassifier(),
#"K-NN" , sklearn.neighbors.KNeighborsClassifier(),
"DecisionTree Gini" , DecisionTreeClassifier(max_depth=4, criterion="gini"),
"DecisionTree Entr" , DecisionTreeClassifier(max_depth=4, criterion="entropy"),
"Gradient Boosting" , sklearn.ensemble.RandomForestClassifier(),
#"Logit Regression" , sklearn.linear_model.LogisticRegression()
];
if (not classifiers is None ):
cls = classifiers;
y_preds = {}
ret_accuracy = [];
cms = [];
clfs = {}
for i in arange( int (len(cls)/2) ):
nm = cls[i*2];
cl = cls[i*2 +1]
y_pred, clfi = run_cv(X,y, None, clf=cl, printDebug=printDebug)
y_preds[nm] = y_pred
clfs[nm] = clfi
ac = accuracy(y, y_pred);
cm = confusion_matrix(y, y_pred )
ret_accuracy.append( (nm, ac, cm) )
if (printDebug):
print ("%20s accuracy: %03f "% (nm, ac) );
#print('{}\n'.format(metrics.classification_report(y, y_pred)))
#print("%20s r^2 score: %03f"% (nm,sklearn.metrics.r2_score(y, y_pred, sample_weight=None, multioutput=None)))
print("%20s r^2 score: %03f"% (nm,sklearn.metrics.r2_score(y, y_pred, sample_weight=None)))
cms.append( (nm, cm) );
if (drawConfusionMatrix):
#print cms, class_names
draw_confusion_matrices(cms, class_names);
DrawFeatureImportanceMatrix(df, cls)
return (X, y, ret_accuracy,cls, y_preds,clfs);
def visualizeTree(dcls, feature_names, class_names= None):
dot_data = StringIO()
tree.export_graphviz(dcls, out_file=dot_data,
feature_names= feature_names,
class_names= class_names,
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
display(Image(graph.create_png()))
return "";
##======================= DRAW Decision Trees here
def DrawDecisionTree(X,y, cls, class_names=None):
imgs=[]
if ( class_names is None):
class_names = y.unique().astype(str);
class_names.sort()
if (str(type(cls)).find('DecisionTreeClassifier') > 0 ):
visualizeTree(cls, X.columns, class_names=class_names)
else:
for k in range(int( len(cls)/2) ) :
dcls = cls[k*2+1];
if (str(type(dcls)).find('DecisionTreeClassifier') > 0):
visualizeTree(dcls, X.columns, class_names=class_names)
#======================== Get COde For DecisionTree
# Here is how to use this code
#if __name__ == '__main__':
# print("\n-- get data:")
# df = dfL;
#
# print("\n-- df.head():")
# print(df.head(), end="\n\n")
#
# features = ["SepalLength", "SepalWidth", "PetalLength", "PetalWidth"]
# df, targets,mi = encodeCategorical(df, "Name", "Target")
# y = df["Target"]
# X = df[features]
#
# dt = DecisionTreeClassifier(min_samples_split=20, random_state=99)
# dt.fit(X, y)
#
# print("\n-- get_code:")
## get_code(dt, features, targets)
#
# print("\n-- look back at original data using pandas")
# print("-- df[df['PetalLength'] <= 2.45]]['Name'].unique(): ",
# df[df['PetalLength'] <= 2.45]['Name'].unique(), end="\n\n")
#
# visualizeTree(dt, features)
def getCodeOfDecisionTree(tree, feature_names, target_names, spacer_base=" "):
"""Produce psuedo-code for decision tree.
Args
----
tree -- scikit-leant DescisionTree.
feature_names -- list of feature names.
target_names -- list of target (class) names.
spacer_base -- used for spacing code (default: " ").
Notes
-----
based on http://stackoverflow.com/a/30104792.
"""
left = tree.tree_.children_left
right = tree.tree_.children_right
threshold = tree.tree_.threshold
features = [feature_names[i] for i in tree.tree_.feature]
value = tree.tree_.value
def recurse(left, right, threshold, features, node, depth):
spacer = spacer_base * depth
if (threshold[node] != -2):
print(spacer + "if ( " + features[node] + " <= " + \
str(threshold[node]) + " ) {")
if left[node] != -1:
recurse (left, right, threshold, features, left[node],
depth+1)
print(spacer + "}\n" + spacer +"else {")
if right[node] != -1:
recurse (left, right, threshold, features, right[node],
depth+1)
print(spacer + "}")
else:
target = value[node]
for i, v in zip(np.nonzero(target)[1], target[np.nonzero(target)]):
target_name = target_names[i]
target_count = int(v)
print(spacer + "return " + str(target_name) + " ( " + \
str(target_count) + " examples )")
recurse(left, right, threshold, features, 0, 0) | PypiClean |
/BottleOIDC-21.8.30.tar.gz/BottleOIDC-21.8.30/README.md |
## BottleOIDC - OIDC Service Provider for Bottle
**BottleOIDC** is an OpenID Connect module providing authentication and authorization for [Bottle web Framework](https://bottlepy.org) web apps.
**BottleOIDC** supports OIDC auto discovery to simplify configuration and deployment.
### Installing
```bash
# pip install BottleOIDC
```
This loads the necessary python modules including bottle and BottleSessions, requests, and PyJWT.
### Using BottleOIDC
```python
from bottle import Bottle
from BottleSessions import BottleSessions
from BottleOIDC import BottleOIDC
from config import oidc_config
app = Bottle()
BottleSessions(app)
auth = BottleOIDC(app, config=oidc_config)
@app.route('/login')
@auth.require_login
def login():
return f'hello {auth.my_username}'
@app.route('/bob')
@auth.require_user('bob')
return 'You must be bob'
if __name__ == '__main__:
app.run()
```
#### Signature and Parameters
```
auth = BottleOIDC(app, config)
```
**`app`** - the Bottle() application context object. **Required.**
**`config`** - a python `dict` of configuration parameters and options. **Required.**
### Configuration Options
**BottleOIDC** is configured by passing a python `dict` with the necessary parameters:
> Example Configuration
```python
oidc_config = {
"discovery_url": "https://login.microsoftonline.com/<tenentid>/V2.0/.well-known/openid-configuration",
"client_id": "1b170767-1234-5678-abcd-90ff90ff90ff",
"client_secret": "MYCLIENTsecret",
"client_scope": ["openid", "email", "profile", ],
"user_attr" : "email",
}
```
**`discovery_url`** - oidc auto discovery url of the IdP. **Required.**
**`client_id`** - oidc client identifier of the app registered with IdP. **Required.**
**`client_secret`** - oidc client secret for the app provided by the IdP. **Required.**
**`client_scope`** - a Python `list` of requested scopes. Default is *['openid', 'email', 'profile']*).
**`user_attr`** - attribute to set username. Default is `email`
**`logout_idp`** - on logout, initiate IdP logout process. Default is `False`.
#### BottleOIDC Object Properties
**`auth.is_authenticated`** - Is `True` if the current session is authenticated.
**`auth.my_username`** - Returns None if the user is not authenticated. Returns `user_attr` value from the Id token, or 'AuthenticatedUser' if the attribute was not available in the Id token.
**`auth.my_attrs`** - Returns dict of attrs returned in the Id token, or {} if not authenticated.
> Example using object properties:
```python
@app.route('/status')
def view():
if auth.is_authenticated:
return {
'user': auth.my_username,
'data': auth.my_attrs
}
else:
return 'You are not Authenticated.'
```
### BottleSaml methods
#### auth.initiate_login()
```python
return auth.initiate_login(next, force_reauth, userhint)
```
`init_login()` returns OIDC code grant request redirect to iDP that initiates login. Arguments:
**`next`** - URL to redirect after login completed. Optional.
**`force_reauth`** - `True` requests IdP to require full reauth for this login. Default `False`
**`userhint`** - (where possible) provides the iDP with username hint. Default `None`
#### auth.initiate_logout()
```python
return auth.initiate_logout(next)
```
`initiate_logout()` clears the Session data to log the user out locally. (To logout from IdP set the **`logout_idp`** config option to `True`.)
**`next`** - URL to redirect after logout completed. Default is '/', *Optional.*
```python
@app.route('/logout')
def logout():
return auth.initiate_logout()
```
#### @auth.login_required
```python
@auth.login_required
def view():
return 'logged in'
```
Decorates a function to initiate login if the session is not authenticated. On successful authentication the browser will be redirected to the view.
#### @auth.add_login_hook
```python
@oidc.add_login_hook
def hook(username, attrs):
return username, attrs
```
Decorates a function to runs after OIDC authentication is completed and tokens have been retrieved.
Login hooks can process and filter username and Id token attributes before the data is stored in the session. Hooks are run in the order they are added.
#### @auth.require_user
```python
@auth.require_user(['bob', 'alice'])
def view():
return 'only bob or alice can get here'
```
Decorator adds authorization requirement to a view. If the sessions `username` is in the list, the view is reached and processed. Otherwise returns a `403 Unauthorized` error if the user is not in the list.
#### @auth.require_attr(attr, value)
```python
@auth.require_attr(attr='groups', value=['sysadmin', 'netadmin'])
def view():
return 'you are in sysadmin or netadmin'
```
Decorator adds authorization requirement to a view. If the session has the desired attribute (in the id token) and it matches one of the values listed, the view is reached and processed. Otherwise returns a `403 Unauthorized` error.
| PypiClean |
/ModelSEEDpy-0.3.0.tar.gz/ModelSEEDpy-0.3.0/modelseedpy/community/mscompatibility.py | from collections import OrderedDict
from cobra.core.metabolite import Metabolite
from cobra.io.json import save_json_model
from zipfile import ZipFile, ZIP_LZMA
from warnings import warn
from pprint import pprint
import json, lzma, re, os
class MSCompatibility:
def __init__(
self,
modelseed_db_path: str, # the local path to the ModelSEEDDatabase repository
printing=True, # specifies whether results are printed
):
self.printing = printing
# import and parse ModelSEED Database reactions and compounds
with open(
os.path.join(modelseed_db_path, "Biochemistry", "reactions.json"), "r"
) as rxns:
self.reactions = json.load(rxns)
self.reaction_ids = OrderedDict()
for rxn in self.reactions:
self.reaction_ids[rxn["id"]] = rxn["name"]
with open(
os.path.join(modelseed_db_path, "Biochemistry", "compounds.json"), "r"
) as rxns:
self.compounds = json.load(rxns)
self.compounds_cross_references, self.compound_names = (
OrderedDict(),
OrderedDict(),
)
for cpd in self.compounds:
self.compounds_cross_references[cpd["id"]] = {}
if cpd["aliases"] is not None:
for category in cpd["aliases"]:
content = category.split(";")
if "Name" in category:
content[0] = content[0].split(":")[0].strip()
names = [name.strip() for name in content]
names.append(cpd["name"])
for name in names:
if name not in self.compound_names:
self.compound_names[name] = cpd["id"]
else:
first = content[0].split(":")
db = first[0].strip()
content[0] = first[1]
self.compounds_cross_references[cpd["id"]][db] = [
x.strip() for x in content
]
# def _parse_modelReactionReagents(self, modelReactionReagents, model_metabolites):
# rxn_dict = {}
# for cpd in modelReactionReagents:
# met = re.search('(?<=id\/)(.+)', cpd['modelcompound_ref']).group()
# stoich = float(cpd['coefficient'])
# if met in model_metabolites:
# met = model_metabolites[met]
# elif re.sub('_\w\d', '', met) in model_metabolites:
# met = model_metabolites[re.sub('_\w\d', '', met)]
# else:
# KeyError(f'ModelSEEDError: The metabolite {met} in the reactions is not in the modelreactions.')
# rxn_dict[met] = stoich
# return rxn_dict
def standardize(
self,
models, # the collection of cobrakbase models that will be compared
metabolites: bool = True, # specifies whether metabolites or reactions (FALSE) will be standardized
exchanges: bool = True, # specifies whether only the exchange reaction will be standardized
conflicts_file_name: str = None, # the metabolite conflicts are stored and organized, where None does not export
model_names: list = None, # specifies the export names of the models
model_format: str = "json", # specifies to which format the model will be exported
export_directory: str = None, # specifies the directory to which all of the content will be exported
):
self.models = models
self.unique_mets, self.met_conflicts = OrderedDict(), OrderedDict()
self.unknown_met_ids, self.changed_metabolites, self.changed_reactions = (
[],
[],
[],
)
self.changed_ids_count = self.changed_rxn_count = 0
for self.model_index, self.model in enumerate(self.models):
# standardize metabolites
if metabolites:
if exchanges:
model_metabolites = [met.id for met in self.model.metabolites]
for ex_rxn in self.model.exchanges:
for met in ex_rxn.metabolites:
met, new_met_id, success = self._fix_met(met)
try:
ex_rxn.id = "EX_" + met.id
except:
ex_rxn.id = "EX_" + new_met_id
if (
"cpd" not in met.id
and success
and new_met_id not in model_metabolites
):
self.unknown_met_ids.append(met.id)
warn(
f"CodeError: The metabolite {met.id} | {met.name} was not corrected to a ModelSEED metabolite."
)
else:
for met in self.model.metabolites:
met, new_met_id, success = self._fix_met(met)
if "cpd" not in met.id:
self.unknown_met_ids.append(met.id)
warn(
f"CodeError: The metabolite {met.id} | {met.name} was not corrected to a ModelSEED metabolite."
)
if conflicts_file_name is not None:
self._export(
{
"metabolite_changes": self.changed_metabolites,
"reaction_changes": self.changed_reactions,
},
conflicts_file_name,
model_names,
model_format,
export_directory,
)
# standardize reactions
# else: #!!! The modelreactions appear to be incorrect
# modelreactions_ids = {re.sub('(_\w\d$)', '', rxn['id']).removeprefix('R-'):rxn for rxn in model.modelreactions}
# with open(os.path.join(export_directory, 'modelreactions.json'), 'w') as out:
# json.dump(modelreactions_ids, out, indent = 3)
# model_metabolites = {met.id:met for met in model.metabolites}
# missed_reactions = 0
# for rxn in model.reactions:
# if 'EX_' in rxn.id:
# continue
# original_reaction = rxn.reaction
# rxn.add_metabolites({rxn_met:0 for rxn_met in rxn.metabolites}, combine = False)
# if re.sub('(_\w\d$)', '', rxn.id) in modelreactions_ids:
# reaction_dict = self._parse_modelReactionReagents(
# modelreactions_ids[re.sub('(_\w\d$)', '', rxn.id)]['modelReactionReagents'], model_metabolites
# )
# elif rxn.id in modelreactions_ids:
# reaction_dict = self._parse_modelReactionReagents(
# modelreactions_ids[rxn.id]['modelReactionReagents'], model_metabolites
# )
# else:
# warn(f'ModelSEEDError: The reaction ID {rxn.id} is not captured by the modelreactions.')
# try:
# rxn.add_metabolites(reaction_dict, combine = False)
# except:
# new_reaction_dict = {}
# for met, content in reaction_dict.items():
# if isinstance(met, str):
# met = re.sub('_\w\d', '', met)
# else:
# if re.sub('_\w\d', '', met.id) not in model.metabolites:
# met.id = re.sub('_\w\d', '', met.id)
# new_reaction_dict[met] = content
# reaction_dict = new_reaction_dict
# if rxn.id not in self.reaction_ids:
# missed_reactions += 1
# # warn(f'ModelSEEDError: The {rxn.id} | {rxn.name} reaction is not recognized by the ModelSEED Database')
# # describe the change
# if original_reaction != rxn.reaction:
# change = {
# 'original': {
# 'reaction': original_reaction
# },
# 'new': {
# 'reaction': rxn.reaction
# },
# 'explanation': f'The reaction {rxn.id} was reconstructed from the ModelSEED Database.'
# }
# self.changed_reactions.append(change)
# if export_directory is not None:
# with open(os.path.join(export_directory, 'standardized_reactions.txt'), 'w') as out:
# json.dump(self.changed_reactions, out, indent = 3)
# total_reactions = 0
# for model in models:
# total_reactions += len(model.reactions)
# warn(f'\nModelSEEDError: {missed_reactions}/{total_reactions} reactions were not captured by the ModelSEED modelreaction IDs.')
self.models[self.model_index] = self.model
print(
f"\n\n{self.changed_rxn_count} reactions were substituted and {self.changed_ids_count} metabolite IDs were redefined."
)
return self.models
def align_exchanges(
self,
models, # the collection of cobrakbase models that will be compared
standardize: bool = False, # standardize the model names and reactions to the ModelSEED Database
conflicts_file_name: str = None, # the metabolite conflicts are stored and organized, where None does not the conflicts
model_names: list = None, # specifies the name of the exported model, where None does not export the models
model_format: str = "json", # specifies to which format the model will be exported
export_directory: str = None, # specifies the directory to which all of the content will be exported
):
self.models = models
self.changed_ids_count = self.changed_rxn_count = 0
if standardize:
self.standardize_MSD(self.models)
(
unique_names,
established_mets,
self.unknown_met_ids,
self.changed_metabolites,
self.changed_reactions,
) = ([], [], [], [], [])
self.unique_mets, self.met_conflicts = OrderedDict(), OrderedDict()
for self.model_index, self.model in enumerate(self.models):
model_metabolites = {met.id: met for met in self.model.metabolites}
for ex_rxn in self.model.exchanges:
for met in ex_rxn.metabolites:
met_name = re.sub("_\w\d$", "", met.name)
if (
met.id not in self.unique_mets
and met.id not in established_mets
):
if met_name not in unique_names:
# identify the unique metabolite
self.unique_mets[met.id] = {
f"model{self.model_index}_id": met.id,
f"model{self.model_index}_met": met,
}
unique_names.append(met_name)
else:
# describe the metabolite conflict between the ID and name
former_id = list(self.unique_mets.keys())[
unique_names.index(met_name)
]
former_model_index = (
list(self.unique_mets[former_id].keys())[0]
.split("_")[0]
.removeprefix("model")
)
if met.name not in self.met_conflicts:
self.met_conflicts[met_name] = {
f"model{former_model_index}_id": former_id,
f"model{former_model_index}_met": self.unique_mets[
former_id
][f"model{former_model_index}_met"],
f"model{self.model_index}_id": met.id,
f"model{self.model_index}_met": met,
}
else:
self.met_conflicts[met_name].update(
{
f"model{self.model_index}_id": met.id,
f"model{self.model_index}_met": met,
}
)
met, new_met_id, success = self._fix_met(met)
else:
former_name = unique_names[
list(self.unique_mets.keys()).index(met.id)
]
former_model_index = (
list(self.unique_mets[met.id].keys())[0]
.split("_")[0]
.removeprefix("model")
)
if met_name == former_name:
# remove the metabolite that is no longer unique
del unique_names[
list(self.unique_mets.keys()).index(met.id)
]
self.unique_mets.pop(met.id)
established_mets.append(met.id)
else:
# describe the conflicting metabolite names
if met.id not in self.met_conflicts:
self.met_conflicts[met.id] = {
f"model{former_model_index}_name": former_name,
f"model{former_model_index}_met": self.unique_mets[
former_id
][f"model{former_model_index}_met"],
f"model{self.model_index}_name": met.name,
f"model{self.model_index}_met": met,
}
else:
if (
f"model{self.model_index}_name"
not in self.met_conflicts[met.id]
):
self.met_conflicts[met.id].update(
{
f"model{self.model_index}_name": met.name,
f"model{self.model_index}_met": met,
}
)
else:
iteration = 0
while (
f"model{self.model_index}_{iteration}_name"
in self.met_conflicts[met.id]
):
iteration += 1
self.met_conflicts[met.id].update(
{
f"model{self.model_index}_{iteration}_name": met.name,
f"model{self.model_index}_{iteration}_met": met,
}
)
met, new_met_id, success = self._fix_met(met)
self.models[self.model_index] = self.model
# correct the reaction ID
if (
re.sub("(_\w\d$)", "", ex_rxn.id).removeprefix("EX_")
in model_metabolites
):
suffix = re.search("(_\w\d$)", ex_rxn.id).group()
rxn_met, new_met_id, success = self._fix_met(
re.sub("(_\w\d$)", "", ex_rxn.id).removeprefix("EX_")
)
ex_rxn.id = "EX_" + new_met_id + suffix
if conflicts_file_name:
export_met_conflicts = {}
for met_id, content in self.met_conflicts.items():
export_met_conflicts[met_id] = {}
for key, val in content.items():
if "_met" not in key:
export_met_conflicts[met_id][key] = val
else:
export_met_conflicts[met_id][
key.replace("_met", "_formula")
] = val.formula
self._export(
export_met_conflicts,
conflicts_file_name,
model_names,
model_format,
export_directory,
)
print(
f"\n\n{self.changed_rxn_count} exchange reactions were substituted and {self.changed_ids_count} exchange metabolite IDs were redefined."
)
return self.models
def _fix_met(self, met):
# correct the conflict
base_name = "".join(met.name.split("-")[1:]).capitalize()
met_name = re.sub("_\w\d$", "", met.name)
new_met_id = met.id
success = True
if met.name in self.compound_names:
met, new_met_id = self.__correct_met(met, met.name)
elif met.name.capitalize() in self.compound_names:
met, new_met_id = self.__correct_met(met, met.name.capitalize())
elif met_name in self.compound_names:
met, new_met_id = self.__correct_met(met, met_name)
elif met_name.capitalize() in self.compound_names:
met, new_met_id = self.__correct_met(met, met_name.capitalize())
elif base_name in self.compound_names and base_name != "":
met, new_met_id = self.__correct_met(met, base_name)
else:
self.unknown_met_ids.append(met.id)
success = False
warn(
f'ModelSEEDError: The metabolite ({" | ".join([x for x in [met.id, met.name, base_name, met_name] if x != ""])}) is not recognized by the ModelSEED Database'
)
return met, new_met_id, success
def _export(
self,
conflicts, # the conflicts dictionary that will be exported
conflicts_file_name, # the metabolite conflicts are stored and organized, where None does not the conflicts
model_names, # specifies the name of the exported model, where None does not export the models
model_format, # specifies to which format the model will be exported
export_directory, # specifies the directory to which all of the content will be exported
):
if export_directory is None:
export_directory = os.getcwd()
file_paths = []
if conflicts_file_name is not None:
path = os.path.join(export_directory, conflicts_file_name)
file_paths.append(os.path.relpath(path, export_directory))
with open(path, "w") as out:
json.dump(conflicts, out, indent=3)
if model_names is not None:
for index, model in enumerate(self.models):
path = os.path.join(
export_directory, f"{model_names[index]}.{model_format}"
)
file_paths.append(os.path.relpath(path, export_directory))
save_json_model(model, path)
with ZipFile(
"_".join(model_names[:4]) + ".zip", "w", compression=ZIP_LZMA
) as zip:
for file in file_paths:
zip.write(file)
os.remove(file)
def __correct_met(self, met, met_name, standardize=False):
def check_cross_references(met, general_met):
for db in self.compounds_cross_references[general_met]:
for cross_ref in self.compounds_cross_references[general_met][db]:
if (
cross_ref
in self.compounds_cross_references[
self.compound_names[met_name]
][db]
):
match = True
break
if match:
break
return match, db
original_id = new_met_id = met.id
compartment = re.search("(_\w\d$)", met.id).group()
if (
met.id.removesuffix(compartment) != self.compound_names[met_name]
): # If the ID associated with the name deviates from that in the ModelSEED Database
new_met_id = self.compound_names[met_name] + compartment
if new_met_id in met.model.metabolites:
# replace the undesirable isomer in every instance, since it cannot be renamed
for rxn in met.reactions:
double_reagent = False
original_reaction = rxn.reaction
removal_dict, reaction_dict = {}, {}
for rxn_met in (
rxn.reactants + rxn.products
): # The REACTANTS+PRODUCTS may resolve metabolites that are both, more than the METABOLITES attribute
match = False
stoich = float(rxn.metabolites[rxn_met])
compartment = re.search("(_\w\d$)", rxn_met.id).group()
new_met = rxn_met
if rxn_met.id == met.id:
if new_met_id in [
old_met.id for old_met in rxn.metabolites
]:
double_reagent = True
warn(
f"CodeError: The metabolite {new_met_id} replacement for {met.id} already exists in the reaction {rxn.id}, thus the reaction cannot be updated."
)
break
# affirm the match with cross-references, where it is possible for ModelSEED compounds
general_met = re.sub("(_\w\d$)", "", met.id)
if (
"cpd" in met.id
and self.compounds_cross_references[general_met] != {}
):
match, db = check_cross_references(met, general_met)
if not match:
warn(
f"ModelSEEDError: The old metabolite {met.id} cross-references ({self.compounds_cross_references[general_met]}) do not overlap with those ({self.compounds_cross_references[self.compound_names[met_name]]}) of the new metabolite {new_met_id}."
)
# remove duplicate exchange reaction
if "EX_" in rxn.id and "EX_" + new_met_id in [
ex_rxn.id for ex_rxn in self.model.exchanges
]:
change = {
"original": {"reaction": original_reaction},
"new": {"reaction": None},
"justification": f"A {new_met_id} exchange reaction already exists in model {self.model_index}, thus this duplicative exchange reaction ({rxn.id}) is deleted.",
}
if match:
change[
"justification"
] += f" The ID match was verified with {db} cross-references."
self.model.remove_reactions([rxn.id])
self.changed_reactions.append(change)
if self.printing:
print("\n")
pprint(change, sort_dicts=False)
self.changed_rxn_count += 1
double_reagent = True
break
# define the metabolite with the new name
new_met = Metabolite(
id=new_met_id,
name=met_name,
formula=met.formula,
charge=met.charge,
compartment=met.compartment,
)
removal_dict[rxn_met] = 0
reaction_dict[new_met] = stoich
# reconstruct the reactions
if double_reagent:
continue
new_reactants = 0
for key, val in reaction_dict.items():
new_reactants += 1 if val < 0 else 0
new_products = len(reaction_dict) - new_reactants
num_reactants, num_products = len(rxn.reactants), len(rxn.products)
if num_reactants == new_reactants and num_products == new_products:
rxn.add_metabolites(removal_dict, combine=False)
rxn.add_metabolites(reaction_dict, combine=False)
change = {
"original": {"reaction": original_reaction},
"new": {"reaction": rxn.reaction},
"justification": f"The {new_met_id} replacement for {met.id} already exists in model {self.model_index}, so each reaction (here {rxn.id}) must be updated.",
}
if match:
change[
"justification"
] += f" The ID match was verified with {db} cross-references."
self.changed_reactions.append(change)
if self.printing:
print("\n")
pprint(change, sort_dicts=False)
self.changed_rxn_count += 1
else:
warn(
f"CodeError: The reaction {reaction_dict} | {new_reactants} {new_products} possesses a different number of reagents than the original reaction {original_reaction} | {num_reactants} {num_products}, and is skipped."
)
else:
# affirm the match with cross-references, where it is possible for ModelSEED compounds
match = False
general_met = re.sub("(_\w\d$)", "", met.id)
if (
"cpd" in met.id
and self.compounds_cross_references[general_met] != {}
):
match, db = check_cross_references(met, general_met)
if not match:
warn(
f"ModelSEEDError: The old metabolite {met.id} cross-references ({self.compounds_cross_references[general_met]}) do not overlap with those ({self.compounds_cross_references[self.compound_names[met_name]]}) of the new metabolite {new_met_id}."
)
# rename the undesirable isomer
met.id = self.compound_names[met_name] + compartment
change = {
"original": {"id": original_id, "name": met.name},
"new": {"id": met.id, "name": met_name + compartment},
"justification": f"The {original_id} and {met.id} distinction in {self.model_index} is incompatible.",
}
if "cpd" not in original_id:
change[
"justification"
] = f"The {original_id} ID is not a ModelSEED Database ID."
if standardize:
change[
"justification"
] = f"The {original_id} and {met.id} metabolites were matched via their name."
if match:
change[
"justification"
] += f" The ID match was verified with {db} cross-references."
self.changed_metabolites.append(change)
if self.printing:
print("\n")
pprint(change, sort_dicts=False)
self.changed_ids_count += 1
return met, new_met_id | PypiClean |
/Coopr-4.0.9597.tar.gz/Coopr-4.0.9597/README.txt | ============
Coopr README
============
The Coopr project provides a wrapper to Pyomo.
-------
License
-------
BSD. See the LICENSE.txt file.
------------
Organization
------------
+ Directories
* coopr - The root directory for Coopr source code
+ Documentation and Bug Tracking
* Trac wiki: https://software.sandia.gov/trac/pyomo
+ Authors
* See the AUTHORS.txt file.
+ Project Managers
* William E. Hart, [email protected]
+ Mailing List
* [email protected]
- The main list for help and announcements
* [email protected]
- Where developers of Coopr discuss new features
--------------------
Third Party Software
--------------------
None.
| PypiClean |
/NehorayRapid-0.0.1-py3-none-any.whl/mmedit/models/losses/perceptual_loss.py | import torch
import torch.nn as nn
import torchvision.models.vgg as vgg
from mmcv.runner import load_checkpoint
from torch.nn import functional as F
from mmedit.utils import get_root_logger
from ..registry import LOSSES
class PerceptualVGG(nn.Module):
"""VGG network used in calculating perceptual loss.
In this implementation, we allow users to choose whether use normalization
in the input feature and the type of vgg network. Note that the pretrained
path must fit the vgg type.
Args:
layer_name_list (list[str]): According to the name in this list,
forward function will return the corresponding features. This
list contains the name each layer in `vgg.feature`. An example
of this list is ['4', '10'].
vgg_type (str): Set the type of vgg network. Default: 'vgg19'.
use_input_norm (bool): If True, normalize the input image.
Importantly, the input feature must in the range [0, 1].
Default: True.
pretrained (str): Path for pretrained weights. Default:
'torchvision://vgg19'
"""
def __init__(self,
layer_name_list,
vgg_type='vgg19',
use_input_norm=True,
pretrained='torchvision://vgg19'):
super().__init__()
if pretrained.startswith('torchvision://'):
assert vgg_type in pretrained
self.layer_name_list = layer_name_list
self.use_input_norm = use_input_norm
# get vgg model and load pretrained vgg weight
# remove _vgg from attributes to avoid `find_unused_parameters` bug
_vgg = getattr(vgg, vgg_type)()
self.init_weights(_vgg, pretrained)
num_layers = max(map(int, layer_name_list)) + 1
assert len(_vgg.features) >= num_layers
# only borrow layers that will be used from _vgg to avoid unused params
self.vgg_layers = _vgg.features[:num_layers]
if self.use_input_norm:
# the mean is for image with range [0, 1]
self.register_buffer(
'mean',
torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
# the std is for image with range [-1, 1]
self.register_buffer(
'std',
torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
for v in self.vgg_layers.parameters():
v.requires_grad = False
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.use_input_norm:
x = (x - self.mean) / self.std
output = {}
for name, module in self.vgg_layers.named_children():
x = module(x)
if name in self.layer_name_list:
output[name] = x.clone()
return output
def init_weights(self, model, pretrained):
"""Init weights.
Args:
model (nn.Module): Models to be inited.
pretrained (str): Path for pretrained weights.
"""
logger = get_root_logger()
load_checkpoint(model, pretrained, logger=logger)
@LOSSES.register_module()
class PerceptualLoss(nn.Module):
"""Perceptual loss with commonly used style loss.
Args:
layers_weights (dict): The weight for each layer of vgg feature.
Here is an example: {'4': 1., '9': 1., '18': 1.}, which means the
5th, 10th and 18th feature layer will be extracted with weight 1.0
in calculting losses.
vgg_type (str): The type of vgg network used as feature extractor.
Default: 'vgg19'.
use_input_norm (bool): If True, normalize the input image in vgg.
Default: True.
perceptual_weight (float): If `perceptual_weight > 0`, the perceptual
loss will be calculated and the loss will multiplied by the
weight. Default: 1.0.
style_weight (float): If `style_weight > 0`, the style loss will be
calculated and the loss will multiplied by the weight.
Default: 1.0.
norm_img (bool): If True, the image will be normed to [0, 1]. Note that
this is different from the `use_input_norm` which norm the input in
in forward function of vgg according to the statistics of dataset.
Importantly, the input image must be in range [-1, 1].
pretrained (str): Path for pretrained weights. Default:
'torchvision://vgg19'.
criterion (str): Criterion type. Options are 'l1' and 'mse'.
Default: 'l1'.
"""
def __init__(self,
layer_weights,
vgg_type='vgg19',
use_input_norm=True,
perceptual_weight=1.0,
style_weight=1.0,
norm_img=True,
pretrained='torchvision://vgg19',
criterion='l1'):
super().__init__()
self.norm_img = norm_img
self.perceptual_weight = perceptual_weight
self.style_weight = style_weight
self.layer_weights = layer_weights
self.vgg = PerceptualVGG(
layer_name_list=list(layer_weights.keys()),
vgg_type=vgg_type,
use_input_norm=use_input_norm,
pretrained=pretrained)
criterion = criterion.lower()
if criterion == 'l1':
self.criterion = torch.nn.L1Loss()
elif criterion == 'mse':
self.criterion = torch.nn.MSELoss()
else:
raise NotImplementedError(
f'{criterion} criterion has not been supported in'
' this version.')
def forward(self, x, gt):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
gt (Tensor): Ground-truth tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.norm_img:
x = (x + 1.) * 0.5
gt = (gt + 1.) * 0.5
# extract vgg features
x_features = self.vgg(x)
gt_features = self.vgg(gt.detach())
# calculate perceptual loss
if self.perceptual_weight > 0:
percep_loss = 0
for k in x_features.keys():
percep_loss += self.criterion(
x_features[k], gt_features[k]) * self.layer_weights[k]
percep_loss *= self.perceptual_weight
else:
percep_loss = None
# calculate style loss
if self.style_weight > 0:
style_loss = 0
for k in x_features.keys():
style_loss += self.criterion(
self._gram_mat(x_features[k]),
self._gram_mat(gt_features[k])) * self.layer_weights[k]
style_loss *= self.style_weight
else:
style_loss = None
return percep_loss, style_loss
def _gram_mat(self, x):
"""Calculate Gram matrix.
Args:
x (torch.Tensor): Tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Gram matrix.
"""
(n, c, h, w) = x.size()
features = x.view(n, c, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (c * h * w)
return gram
@LOSSES.register_module()
class TransferalPerceptualLoss(nn.Module):
"""Transferal perceptual loss.
Args:
loss_weight (float): Loss weight. Default: 1.0.
use_attention (bool): If True, use soft-attention tensor. Default: True
criterion (str): Criterion type. Options are 'l1' and 'mse'.
Default: 'l1'.
"""
def __init__(self, loss_weight=1.0, use_attention=True, criterion='mse'):
super().__init__()
self.use_attention = use_attention
self.loss_weight = loss_weight
criterion = criterion.lower()
if criterion == 'l1':
self.loss_function = torch.nn.L1Loss()
elif criterion == 'mse':
self.loss_function = torch.nn.MSELoss()
else:
raise ValueError(
f"criterion should be 'l1' or 'mse', but got {criterion}")
def forward(self, maps, soft_attention, textures):
"""Forward function.
Args:
maps (Tuple[Tensor]): Input tensors.
soft_attention (Tensor): Soft-attention tensor.
textures (Tuple[Tensor]): Ground-truth tensors.
Returns:
Tensor: Forward results.
"""
if self.use_attention:
h, w = soft_attention.shape[-2:]
softs = [torch.sigmoid(soft_attention)]
for i in range(1, len(maps)):
softs.append(
F.interpolate(
soft_attention,
size=(h * pow(2, i), w * pow(2, i)),
mode='bicubic',
align_corners=False))
else:
softs = [1., 1., 1.]
loss_texture = 0
for map, soft, texture in zip(maps, softs, textures):
loss_texture += self.loss_function(map * soft, texture * soft)
return loss_texture * self.loss_weight | PypiClean |
/Flask-Comment-0.1.0.tar.gz/Flask-Comment-0.1.0/src/flask_comment/__init__.py | import sys
import typing as t
from flask import Blueprint
from flask import current_app
from flask import Flask
from flask import Markup
from flask import request
# sanity checking
try:
assert sys.version_info >= (3, 6, 0)
except AssertionError: # pragma: no cover
raise RuntimeError('Flask-Comment required Python 3.6+!')
__version__ = '0.1.0'
DISQUS = 'disqus'
CUSDIS = 'cusdis'
VALINE = 'valine'
UTTERANCES = 'utterances'
GITALK = 'gitalk'
class Comment:
def __init__(
self,
app: t.Optional[Flask] = None,
platform: str = DISQUS,
) -> None:
if app is not None: # pragma: no cover
self.init_app(app, platform=platform)
def init_app(
self,
app: Flask,
platform: str = DISQUS,
) -> None:
if not hasattr(app, 'extensions'): # pragma: no cover
app.extensions = {}
app.extensions['comment'] = self
blueprint = Blueprint('comment', __name__)
app.register_blueprint(blueprint)
app.jinja_env.globals['comment'] = self
app.config.setdefault('COMMENT_PLATFORM', platform)
def load(
self,
page_url: t.Optional[str] = None,
page_indentifier: t.Optional[str] = None,
) -> Markup:
"""
Load comment component resources.
Examples:
```py
from flask import Flask
from flask_comment import Comment
app = Flask(__name__)
comment = Comment(app)
```
Arguments:
page_url: The [page url](https://help.disqus.com/en/articles/1717084-javascript-configuration-variables) for Disqus,
default to [`flask.request.base_url`](https://flask.palletsprojects.com/en/latest/api/?highlight=base_url#flask.Request.base_url).
page_indentifier: The [page indentifier](https://help.disqus.com/en/articles/1717084-javascript-configuration-variables) for Disqus,
default to [`flask.request.path`](https://flask.palletsprojects.com/en/latest/api/?highlight=request%20path#flask.Request.path).
"""
if current_app.config['COMMENT_PLATFORM'] == DISQUS:
return self._load_disqus(page_url, page_indentifier)
if current_app.config['COMMENT_PLATFORM'] == CUSDIS:
return self._load_cusdis()
if current_app.config['COMMENT_PLATFORM'] == VALINE:
return self._load_valine()
if current_app.config['COMMENT_PLATFORM'] == UTTERANCES:
return self._load_utterances()
if current_app.config['COMMENT_PLATFORM'] == GITALK: # pragma: no cover
return self._load_gitalk()
@staticmethod
def _load_disqus(
page_url: t.Optional[str] = None,
page_indentifier: t.Optional[str] = None,
) -> Markup:
"""
Load Disqus resources.
Reference: https://disqus.com/
"""
page_url = page_url or request.base_url
page_indentifier = page_indentifier or request.path
short_name = current_app.config['COMMENT_DISQUS_SHORTNAME']
return Markup(
"""<div id='disqus_thread'></div>
<script>
var disqus_config = function () {{
this.page.url = '{}';
this.page.identifier = '{}';
}};
(function() {{ // DON'T EDIT BELOW THIS LINE
var d = document, s = d.createElement('script');
s.src = 'https://{}.disqus.com/embed.js';
s.setAttribute('data-timestamp', +new Date());
(d.head || d.body).appendChild(s);
}})();
</script>
<noscript>Please enable JavaScript to view the <a href="https://disqus.com/?ref_noscript">
comments powered by Disqus.</a></noscript>""".format(
page_url, page_indentifier, short_name
)
)
@staticmethod
def _load_cusdis() -> Markup:
"""
Load Cusdis resources.
Reference: https://cusdis.com/
"""
app_id = current_app.config['COMMENT_CUSDIS_APP_ID']
page_id = request.path
return Markup(
"""<div id="cusdis_thread"
data-host="https://cusdis.com"
data-app-id="{}"
data-page-id="{}"
data-page-url="{}"
data-page-title=""
>
<script>
(function(){{
const cusdis = document.getElementById('cusdis_thread')
cusdis.setAttribute('data-page-title', document.title)
}})()
</script>
<script async src="https://cusdis.com/js/cusdis.es.js"></script>
""".format(
app_id, page_id, page_id
)
)
@staticmethod
def _load_valine() -> Markup:
"""
Load Valine resources.
Reference: https://valine.js.org/
"""
app_id = current_app.config['COMMENT_VALINE_APP_ID']
app_key = current_app.config['COMMENT_VALINE_APP_KEY']
return Markup(
"""<script src='https://unpkg.com/valine/dist/Valine.min.js'></script>
<div id='vcomments'></div>
<script>
new Valine({{
'el': '#vcomments',
'appId': '{}',
'appKey': '{}',
}})
</script>""".format(
app_id, app_key
)
)
@staticmethod
def _load_utterances() -> Markup:
"""
Load Utterances resources.
Reference: https://utteranc.es/
"""
repo = current_app.config['COMMENT_UTTERANCES_REPO']
return Markup(
"""<script src="https://utteranc.es/client.js"
repo="%s"
issue-term="pathname"
theme="github-light"
crossorigin="anonymous"
async>
</script>"""
% repo
)
@staticmethod
def _load_gitalk() -> Markup:
"""
Load Gitalk resources.
Reference: https://github.com/gitalk/gitalk#install
"""
client_id = current_app.config['COMMENT_GITALK_CLIENT_ID']
client_secret = current_app.config['COMMENT_GITALK_CLIENT_SECRET']
repo = current_app.config['COMMENT_GITALK_REPO']
owner = current_app.config['COMMENT_GITALK_OWNER']
admin = current_app.config['COMMENT_GITALK_ADMIN']
return Markup(
"""<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/gitalk@1/dist/gitalk.css">
<script src="https://cdn.jsdelivr.net/npm/gitalk@1/dist/gitalk.min.js"></script>
<div id="gitalk-container"></div>
<div id='vcomments'></div>
<script>
const gitalk = new Gitalk({{
clientID: '{}',
clientSecret: '{}',
repo: '{}', // The repository of store comments,
owner: '{}',
admin: ['{}'],
id: location.pathname, // Ensure uniqueness and length less than 50
distractionFreeMode: false // Facebook-like distraction free mode
}})
gitalk.render('gitalk-container')
</script>""".format(
client_id, client_secret, repo, owner, admin
)
) | PypiClean |
/CAMELS_library-0.3.tar.gz/CAMELS_library-0.3/plots/different_seeds/plot_CV.py | from pylab import *
import numpy as np
from matplotlib.ticker import ScalarFormatter
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib.ticker import AutoMinorLocator
from matplotlib.colors import LogNorm
from matplotlib.patches import Ellipse
rcParams["mathtext.fontset"]='cm'
################################## INPUT #######################################
root = '/mnt/ceph/users/camels/Results'
f_out = 'different_seed_CV.pdf'
# Pk m
f11 = '%s/Pk/IllustrisTNG/mean_CV_Pk_m_z=0.00.txt'%root
f12 = '%s/Pk/SIMBA/mean_CV_Pk_m_z=0.00.txt'%root
#f12 = '%s/Pk/SIMBA_OLD/mean_Pk_m_z=0.00.txt'%root
f13 = '%s/Pk/IllustrisTNG/extreme_0/Pk_m_z=0.00.txt'%root
f14 = '%s/Pk/IllustrisTNG/extremestellar_0/Pk_m_z=0.00.txt'%root
f15 = '%s/Pk/IllustrisTNG/noFB_0/Pk_m_z=0.00.txt'%root
# Pk g
f21 = '%s/Pk/IllustrisTNG/mean_CV_Pk_g_z=0.00.txt'%root
f22 = '%s/Pk/SIMBA/mean_CV_Pk_g_z=0.00.txt'%root
#f22 = '%s/Pk/SIMBA_OLD/mean_Pk_g_z=0.00.txt'%root
f23 = '%s/Pk/IllustrisTNG/extreme_0/Pk_g_z=0.00.txt'%root
f24 = '%s/Pk/IllustrisTNG/extremestellar_0/Pk_g_z=0.00.txt'%root
f25 = '%s/Pk/IllustrisTNG/noFB_0/Pk_g_z=0.00.txt'%root
# ratio Pk m
f31 = '%s/Pk_ratio/IllustrisTNG/mean_CV_Pk_ratio_m_z=0.00.txt'%root
f32 = '%s/Pk_ratio/SIMBA/mean_CV_Pk_ratio_m_z=0.00.txt'%root
#f32 = '%s/Pk_ratio/SIMBA_OLD/mean_Pk_ratio_m_z=0.00.txt'%root
#f33 = '%s/Pk/IllustrisTNG/extreme_0/Pk_g_z=0.00.txt'%root
#f34 = '%s/Pk/IllustrisTNG/extremestellar_0/Pk_g_z=0.00.txt'%root
#f35 = '%s/Pk/IllustrisTNG/noFB_0/Pk_g_z=0.00.txt'%root
# HMF
f41 = '%s/HMF/IllustrisTNG/mean_CV_mass_function_1.00e+10_1.00e+14_30_z=0.00.txt'%root
f42 = '%s/HMF/SIMBA/mean_CV_mass_function_1.00e+10_1.00e+14_30_z=0.00.txt'%root
#f42 = '%s/HMF/SIMBA_OLD/mean_mass_function_1.00e+10_1.00e+14_30_z=0.00.txt'%root
f43 = '%s/HMF/IllustrisTNG/extreme_0/mass_function_1.00e+10_1.00e+14_30_z=0.00.txt'%root
f44 = '%s/HMF/IllustrisTNG/extremestellar_0/mass_function_1.00e+10_1.00e+14_30_z=0.00.txt'%root
f45 = '%s/HMF/IllustrisTNG/noFB_0/mass_function_1.00e+10_1.00e+14_30_z=0.00.txt'%root
# SFRH
f51 = '%s/SFRH/IllustrisTNG/mean_CV_SFRH_0.00_10.00_10000.txt'%root
f52 = '%s/SFRH/SIMBA/mean_CV_SFRH_0.00_10.00_10000.txt'%root
#f52 = '%s/SFRH/SIMBA_OLD/mean_SFRH_0.00_10.00_10000.txt'%root
f53 = '%s/SFRH/IllustrisTNG/extreme_0/SFRH_0.00_10.00_10000.txt'%root
f54 = '%s/SFRH/IllustrisTNG/extremestellar_0/SFRH_0.00_10.00_10000.txt'%root
f55 = '%s/SFRH/IllustrisTNG/noFB_0/SFRH_0.00_10.00_10000.txt'%root
# SMF
f61 = '%s/SMF/IllustrisTNG/mean_CV_SMF_1.00e+09_5e+11_10_z=0.00.txt'%root
f62 = '%s/SMF/SIMBA/mean_CV_SMF_1.00e+09_5e+11_10_z=0.00.txt'%root
#f62 = '%s/SMF/SIMBA_OLD/mean_SMF_1.00e+09_5e+11_10_z=0.00.txt'%root
f63 = '%s/SMF/IllustrisTNG/extreme_0/SMF_1.00e+09_5e+11_10_z=0.00.txt'%root
f64 = '%s/SMF/IllustrisTNG/extremestellar_0/SMF_1.00e+09_5e+11_10_z=0.00.txt'%root
f65 = '%s/SMF/IllustrisTNG/noFB_0/SMF_1.00e+09_5e+11_10_z=0.00.txt'%root
# baryon fraction
f71 = '%s/baryon_fraction/IllustrisTNG/mean_CV_bf_1.00e+10_1.00e+14_30_z=0.00.txt'%root
f72 = '%s/baryon_fraction/SIMBA/mean_CV_bf_1.00e+10_1.00e+14_30_z=0.00.txt'%root
#f72 = '%s/baryon_fraction/SIMBA_OLD/mean_bf_1.00e+10_1.00e+14_30_z=0.00.txt'%root
f73 = '%s/baryon_fraction/IllustrisTNG/extreme_0/bf_1.00e+10_1.00e+14_30_z=0.00.txt'%root
f74 = '%s/baryon_fraction/IllustrisTNG/extremestellar_0/bf_1.00e+10_1.00e+14_30_z=0.00.txt'%root
f75 = '%s/baryon_fraction/IllustrisTNG/noFB_0/bf_1.00e+10_1.00e+14_30_z=0.00.txt'%root
# halos temperature
f81 = '%s/SO/IllustrisTNG/mean_T_CV_1.00e+12_1.00e+14_10_z=0.00.txt'%root
f82 = '%s/SO/SIMBA/mean_T_CV_1.00e+12_1.00e+14_10_z=0.00.txt'%root
#f82 = '%s/SO/SIMBA_OLD/mean_T_1.00e+12_1.00e+14_10_z=0.00.txt'%root
f83 = '%s/SO/IllustrisTNG/extreme_0/SO_z=0.00.txt'%root
f84 = '%s/SO/IllustrisTNG/extremestellar_0/SO_z=0.00.txt'%root
f85 = '%s/SO/IllustrisTNG/noFB_0/SO_z=0.00.txt'%root
# galaxies radius
f91 = '%s/Radii/IllustrisTNG/mean_R_vs_SM_CV_z=0.00.txt'%root
f92 = '%s/Radii/SIMBA/mean_R_vs_SM_CV_z=0.00.txt'%root
#f92 = '%s/Radii/SIMBA_OLD/mean_R_vs_SM_z=0.00.txt'%root
f93 = '%s/Radii/IllustrisTNG/extreme_0/R_vs_SM_1.00e+09_5e+11_10_z=0.00.txt'%root
f94 = '%s/Radii/IllustrisTNG/extremestellar_0/R_vs_SM_1.00e+09_5e+11_10_z=0.00.txt'%root
f95 = '%s/Radii/IllustrisTNG/noFB_0/R_vs_SM_1.00e+09_5e+11_10_z=0.00.txt'%root
# Black hole masses
f101 = '%s/BH/IllustrisTNG/mean_BH_vs_SM_CV_z=0.00.txt'%root
f102 = '%s/BH/SIMBA/mean_BH_vs_SM_CV_z=0.00.txt'%root
#f102 = '%s/BH/SIMBA_OLD/mean_BH_vs_SM_z=0.00.txt'%root
f103 = '%s/BH/IllustrisTNG/extreme_0/BH_vs_SM_1.00e+09_5e+11_10_z=0.00.txt'%root
f104 = '%s/BH/IllustrisTNG/extremestellar_0/BH_vs_SM_1.00e+09_5e+11_10_z=0.00.txt'%root
f105 = '%s/BH/IllustrisTNG/noFB_0/BH_vs_SM_1.00e+09_5e+11_10_z=0.00.txt'%root
# Vmax
f111 = '%s/Vmax/IllustrisTNG/mean_V_vs_SM_CV_z=0.00.txt'%root
f112 = '%s/Vmax/SIMBA/mean_V_vs_SM_CV_z=0.00.txt'%root
#f112 = '%s/Vmax/SIMBA_OLD/mean_V_vs_SM_z=0.00.txt'%root
f113 = '%s/Vmax/IllustrisTNG/extreme_0/Vmax_vs_SM_1.00e+09_5e+11_10_z=0.00.txt'%root
f114 = '%s/Vmax/IllustrisTNG/extremestellar_0/Vmax_vs_SM_1.00e+09_5e+11_10_z=0.00.txt'%root
f115 = '%s/Vmax/IllustrisTNG/noFB_0/Vmax_vs_SM_1.00e+09_5e+11_10_z=0.00.txt'%root
# SFR
f121 = '%s/SFR/IllustrisTNG/mean_SFR_vs_SM_CV_z=0.00.txt'%root
f122 = '%s/SFR/SIMBA/mean_SFR_vs_SM_CV_z=0.00.txt'%root
#f122 = '%s/SFR/SIMBA_OLD/mean_SFR_vs_SM_z=0.00.txt'%root
f123 = '%s/SFR/IllustrisTNG/extreme_0/SFR_vs_SM_1.00e+09_5e+11_10_z=0.00.txt'%root
f124 = '%s/SFR/IllustrisTNG/extremestellar_0/SFR_vs_SM_1.00e+09_5e+11_10_z=0.00.txt'%root
f125 = '%s/SFR/IllustrisTNG/noFB_0/SFR_vs_SM_1.00e+09_5e+11_10_z=0.00.txt'%root
################################################################################
fig = figure(figsize=(20,22))
gs = gridspec.GridSpec(4,3)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax3 = plt.subplot(gs[2])
ax4 = plt.subplot(gs[3])
ax5 = plt.subplot(gs[4])
ax6 = plt.subplot(gs[5])
ax7 = plt.subplot(gs[6])
ax8 = plt.subplot(gs[7])
ax9 = plt.subplot(gs[8])
ax10 = plt.subplot(gs[9])
ax11 = plt.subplot(gs[10])
ax12 = plt.subplot(gs[11])
subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.2)
###### Pk m plot ######
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_xlim([0.3,35])
ax1.set_ylim([1e-1,1e3])
ax1.set_xlabel(r'$k\,[h{\rm Mpc}^{-1}]$',fontsize=18)
ax1.set_ylabel(r'$P_{\rm m}(k)\,[h^{-3}{\rm Mpc}^3]$',fontsize=18)
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f11, unpack=True)
ax1.fill_between(X, dYp, dYm, color='blue')
p1,=ax1.plot(X,Ym,c='k',linestyle='-')
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f12, unpack=True)
ax1.fill_between(X, dYp, dYm, color='red', alpha=0.6)
p2,=ax1.plot(X,Ym,c='k',linestyle='--')
X,Y = np.loadtxt(f13, unpack=True)
#p3,=ax1.plot(X,Y,c='g',linestyle='-')
X,Y = np.loadtxt(f14, unpack=True)
#p3,=ax1.plot(X,Y,c='g',linestyle='--')
X,Y = np.loadtxt(f15, unpack=True)
#p3,=ax1.plot(X,Y,c='g',linestyle='dotted')
#######################
###### Pk g plot ######
ax2.set_xscale('log')
ax2.set_yscale('log')
ax2.set_xlim([0.3,35])
ax2.set_ylim([1e-2,1e3])
ax2.set_xlabel(r'$k\,[h{\rm Mpc}^{-1}]$',fontsize=18)
ax2.set_ylabel(r'$P_{\rm g}(k)\,[h^{-3}{\rm Mpc}^3]$',fontsize=18)
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f21, unpack=True)
ax2.fill_between(X, dYp, dYm, color='blue')
ax2.plot(X,Ym,c='k',linestyle='-')
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f22, unpack=True)
ax2.fill_between(X, dYp, dYm, color='red', alpha=0.6)
ax2.plot(X,Ym,c='k',linestyle='--')
X,Y = np.loadtxt(f23, unpack=True)
#p3,=ax2.plot(X,Y,c='g',linestyle='-')
X,Y = np.loadtxt(f24, unpack=True)
#p3,=ax2.plot(X,Y,c='g',linestyle='--')
X,Y = np.loadtxt(f25, unpack=True)
#p3,=ax2.plot(X,Y,c='g',linestyle='dotted')
#######################
###### ratio Pk m plot ######
ax3.set_xscale('log')
ax3.set_xlim([0.3,35])
ax3.set_ylim([0.5,1.05])
ax3.set_xlabel(r'$k\,[h{\rm Mpc}^{-1}]$',fontsize=18)
ax3.set_ylabel(r'$P_{\rm hydro}(k)/P_{\rm Nbody}(k)$',fontsize=18)
X,Y,dY,dYp,dYm,Ym= np.loadtxt(f31, unpack=True)
ax3.fill_between(X, dYp, dYm, color='blue')
ax3.plot(X,Ym,c='k',linestyle='-')
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f32, unpack=True)
ax3.fill_between(X, dYp, dYm, color='red', alpha=0.6)
ax3.plot(X,Ym,c='k',linestyle='--')
#######################
######### HMF #########
ax4.set_xscale('log')
ax4.set_yscale('log')
ax4.set_xlabel(r'$M_{\rm halo}/\Omega_{\rm m}\,[h^{-1}M_\odot]$',fontsize=18)
ax4.set_ylabel(r'${\rm HMF}\,[h^{4}{\rm Mpc}^{-3}M_\odot^{-1}]$',fontsize=18)
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f41, unpack=True)
ax4.fill_between(X, dYp, dYm, color='blue')
ax4.plot(X,Ym,c='k',linestyle='-')
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f42, unpack=True)
ax4.fill_between(X, dYp, dYm, color='red', alpha=0.6)
ax4.plot(X,Ym,c='k',linestyle='--')
dumb,X,Y = np.loadtxt(f43, unpack=True)
#p3,=ax4.plot(X,Y,c='g',linestyle='-')
dumb,X,Y = np.loadtxt(f44, unpack=True)
#p3,=ax4.plot(X,Y,c='g',linestyle='--')
dumb,X,Y = np.loadtxt(f45, unpack=True)
#p3,=ax4.plot(X,Y,c='g',linestyle='dotted')
#######################
###### SFRH ######
ax5.set_yscale('log')
ax5.set_xlim([0.0,7.0])
ax5.set_ylim([5e-4,0.2])
ax5.set_xlabel(r'$z$',fontsize=18)
ax5.set_ylabel(r'${\rm SFRH}\,[M_\odot{\rm yr}^{-1}{\rm Mpc}^{-3}]$',fontsize=18)
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f51, unpack=True)
ax5.fill_between(X, dYp, dYm, color='blue')
ax5.plot(X,Ym,c='k',linestyle='-')
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f52, unpack=True)
ax5.fill_between(X, dYp, dYm, color='red', alpha=0.6)
ax5.plot(X,Ym,c='k',linestyle='--')
X,Y = np.loadtxt(f53, unpack=True)
#p3,=ax5.plot(X,Y,c='g',linestyle='-')
X,Y = np.loadtxt(f54, unpack=True)
#p3,=ax5.plot(X,Y,c='g',linestyle='--')
X,Y = np.loadtxt(f55, unpack=True)
#p3,=ax5.plot(X,Y,c='g',linestyle='dotted')
#######################
###### SMF ######
ax6.set_xscale('log')
ax6.set_yscale('log')
ax6.set_xlabel(r'$M_*\,[h^{-1}M_\odot]$',fontsize=18)
ax6.set_ylabel(r'${\rm SMF}\,[h^4{\rm Mpc}^{-3}M_\odot^{-1}]$',fontsize=18)
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f61, unpack=True)
ax6.fill_between(X, dYp, dYm, color='blue')
ax6.plot(X,Ym,c='k',linestyle='-')
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f62, unpack=True)
ax6.fill_between(X, dYp, dYm, color='red', alpha=0.6)
ax6.plot(X,Ym,c='k',linestyle='--')
X,Y = np.loadtxt(f63, unpack=True)
#p3,=ax6.plot(X,Y,c='g',linestyle='-')
X,Y = np.loadtxt(f64, unpack=True)
#p3,=ax6.plot(X,Y,c='g',linestyle='--')
X,Y = np.loadtxt(f65, unpack=True)
#p3,=ax6.plot(X,Y,c='g',linestyle='dotted')
#######################
###### baryon fraction ######
ax7.set_xscale('log')
ax7.set_xlabel(r'$M_{\rm halo}/\Omega_{\rm m}\,[h^{-1}M_\odot]$',fontsize=18)
ax7.set_ylabel(r'$M_{\rm b}/M_{\rm halo}/(\Omega_{\rm b}/\Omega_{\rm m})$',fontsize=18)
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f71, unpack=True)
ax7.fill_between(X, dYp, dYm, color='blue')
ax7.plot(X,Ym,c='k',linestyle='-')
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f72, unpack=True)
ax7.fill_between(X, dYp, dYm, color='red', alpha=0.6)
ax7.plot(X,Ym,c='k',linestyle='--')
X,Y = np.loadtxt(f73, unpack=True)
#p3,=ax7.plot(X,Y,c='g',linestyle='-')
X,Y = np.loadtxt(f74, unpack=True)
#p3,=ax7.plot(X,Y,c='g',linestyle='--')
X,Y = np.loadtxt(f75, unpack=True)
#p3,=ax7.plot(X,Y,c='g',linestyle='dotted')
#######################
######## halos temperature #######
ax8.set_xscale('log')
ax8.set_yscale('log')
ax8.set_xlabel(r'$M_{\rm halo}\,[h^{-1}M_\odot]$',fontsize=18)
ax8.set_ylabel(r'$T_{\rm halo}\,[K]$',fontsize=18)
#X,Y,dY,N = np.loadtxt(f81, unpack=True)
#ax8.fill_between(X, Y+dY, Y-dY, color='blue')
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f81, unpack=True)
ax8.fill_between(X, dYp, dYm, color='blue')
ax8.plot(X,Ym,c='k',linestyle='-')
#X,Y,dY,N = np.loadtxt(f82, unpack=True)
#ax8.fill_between(X, Y+dY, Y-dY, color='red', alpha=0.6)
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f82, unpack=True)
ax8.fill_between(X, dYp, dYm, color='red', alpha=0.6)
ax8.plot(X,Ym,c='k',linestyle='--')
#data = np.loadtxt(f83)
#ax8.plot(data[:,0], data[:,9],c='k',linestyle='--')
#data = np.loadtxt(f84)
#ax8.plot(data[:,0], data[:,9],c='k',linestyle='--')
#data = np.loadtxt(f85)
#ax8.plot(data[:,0], data[:,9],c='k',linestyle='--')
##################################
############# Radii ##############
ax9.set_xscale('log')
ax9.set_yscale('log')
ax9.set_xlabel(r'$M_*\,[h^{-1}M_\odot]$',fontsize=18)
ax9.set_ylabel(r'$R_{1/2}\,[h^{-1}{\rm kpc}]$',fontsize=18)
#X,Y,dY,N = np.loadtxt(f91, unpack=True)
#ax9.fill_between(X, Y+dY, Y-dY, color='blue')
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f91, unpack=True)
ax9.fill_between(X, dYp, dYm, color='blue')
ax9.plot(X,Ym,c='k',linestyle='-')
#X,Y,dY,N = np.loadtxt(f92, unpack=True)
#ax9.fill_between(X, Y+dY, Y-dY, color='red', alpha=0.6)
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f92, unpack=True)
ax9.fill_between(X, dYp, dYm, color='red', alpha=0.6)
ax9.plot(X,Ym,c='k',linestyle='--')
X,Y,dY = np.loadtxt(f93, unpack=True)
#p3,=ax9.plot(X,Y,c='g',linestyle='-')
X,Y,dY = np.loadtxt(f94, unpack=True)
#p3,=ax9.plot(X,Y,c='g',linestyle='--')
X,Y,dY = np.loadtxt(f95, unpack=True)
#p3,=ax9.plot(X,Y,c='g',linestyle='dotted')
##################################
############# BH masses ##############
ax10.set_xscale('log')
ax10.set_yscale('log')
ax10.set_xlabel(r'$M_*\,[h^{-1}M_\odot]$',fontsize=18)
ax10.set_ylabel(r'$M_{\rm black-holes}\,[h^{-1}M_\odot]$',fontsize=18)
#X,Y,dY,N = np.loadtxt(f101, unpack=True)
#ax10.fill_between(X, Y+dY, Y-dY, color='blue')
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f101, unpack=True)
ax10.fill_between(X, dYp, dYm, color='blue')
ax10.plot(X,Ym,c='k',linestyle='-')
#X,Y,dY,N = np.loadtxt(f102, unpack=True)
#ax10.fill_between(X, Y+dY, Y-dY, color='red', alpha=0.6)
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f102, unpack=True)
ax10.fill_between(X, dYp, dYm, color='red', alpha=0.6)
ax10.plot(X,Ym,c='k',linestyle='--')
X,Y,dY = np.loadtxt(f103, unpack=True)
#p3,=ax10.plot(X,Y,c='g',linestyle='-')
X,Y,dY = np.loadtxt(f104, unpack=True)
#p3,=ax10.plot(X,Y,c='g',linestyle='--')
X,Y,dY = np.loadtxt(f105, unpack=True)
#p3,=ax10.plot(X,Y,c='g',linestyle='dotted')
##################################
############# Vmax ##############
ax11.set_xscale('log')
ax11.set_yscale('log')
ax11.set_xlabel(r'$M_*\,[h^{-1}M_\odot]$',fontsize=18)
ax11.set_ylabel(r'$V_{\rm max}\,[{\rm km/s}]$',fontsize=18)
#X,Y,dY,N = np.loadtxt(f111, unpack=True)
#ax11.fill_between(X, Y+dY, Y-dY, color='blue')
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f111, unpack=True)
ax11.fill_between(X, dYp, dYm, color='blue')
ax11.plot(X,Ym,c='k',linestyle='-')
#X,Y,dY,N = np.loadtxt(f112, unpack=True)
#ax11.fill_between(X, Y+dY, Y-dY, color='red', alpha=0.6)
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f112, unpack=True)
ax11.fill_between(X, dYp, dYm, color='red', alpha=0.6)
ax11.plot(X,Ym,c='k',linestyle='--')
X,Y,dY = np.loadtxt(f113, unpack=True)
#p3,=ax11.plot(X,Y,c='g',linestyle='-')
X,Y,dY = np.loadtxt(f114, unpack=True)
#p3,=ax11.plot(X,Y,c='g',linestyle='--')
X,Y,dY = np.loadtxt(f115, unpack=True)
#p3,=ax11.plot(X,Y,c='g',linestyle='dotted')
##################################
############# SFR ##############
ax12.set_xscale('log')
ax12.set_yscale('log')
ax12.set_xlabel(r'$M_*\,[h^{-1}M_\odot]$',fontsize=18)
ax12.set_ylabel(r'${\rm SFR}\,[M_\odot{\rm yr}^{-1}]$',fontsize=18)
#X,Y,dY,N = np.loadtxt(f121, unpack=True)
#ax12.fill_between(X, Y+dY, Y-dY, color='blue')
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f121, unpack=True)
ax12.fill_between(X, dYp, dYm, color='blue')
ax12.plot(X,Ym,c='k',linestyle='-')
#X,Y,dY,N = np.loadtxt(f122, unpack=True)
#ax12.fill_between(X, Y+dY, Y-dY, color='red', alpha=0.6)
X,Y,dY,dYp,dYm,Ym = np.loadtxt(f122, unpack=True)
ax12.fill_between(X, dYp, dYm, color='red', alpha=0.6)
ax12.plot(X,Ym,c='k',linestyle='--')
X,Y,dY = np.loadtxt(f123, unpack=True)
#p3,=ax12.plot(X,Y,c='g',linestyle='-')
X,Y,dY = np.loadtxt(f124, unpack=True)
#p3,=ax12.plot(X,Y,c='g',linestyle='--')
X,Y,dY = np.loadtxt(f125, unpack=True)
#p3,=ax12.plot(X,Y,c='g',linestyle='dotted')
##################################
#legend
ax1.legend([p1,p2],
[r"${\rm IllustrisTNG}$",
r"${\rm SIMBA}$"],
loc=0,prop={'size':18},ncol=1,frameon=True)
#ax1.set_title(r'$\sum m_\nu=0.0\/{\rm eV}$',position=(0.5,1.02),size=18)
#title('About as simple as it gets, folks')
#suptitle('About as simple as it gets, folks') #for title with several panels
#grid(True)
#show()
savefig(f_out, bbox_inches='tight')
close(fig) | PypiClean |
/MACS-1.4.3.tar.gz/MACS-1.4.3/lib/IO/bedGraphIO.py | # ------------------------------------
# python modules
# ------------------------------------
import os
import sys
import re
import shutil
from MACS1.IO.FeatIO import bedGraphTrackI
from MACS1.IO.BinKeeper import BinKeeperII
import time
# ------------------------------------
# constants
# ------------------------------------
# ------------------------------------
# Misc functions
# ------------------------------------
# ------------------------------------
# Classes
# ------------------------------------
class bedGraphIO:
"""File Parser Class for bedGraph File.
"""
def __init__ (self,f):
"""f must be a filename or a file handler.
"""
if type(f) == str:
self.fhd = open(f,"r")
elif type(f) == file:
self.fhd = f
else:
raise Exception("f must be a filename or a file handler.")
def build_bdgtrack (self):
"""Use this function to return a bedGraphTrackI object.
"""
data = bedGraphTrackI()
add_func = data.add_loc
for i in self.fhd:
if i.startswith("track"):
continue
elif i.startswith("#"):
continue
elif i.startswith("browse"):
continue
else:
(chrom,startpos,endpos,value)=i.split()
add_func(chrom,int(startpos),int(endpos),float(value))
self.fhd.seek(0)
return data
def build_binKeeper (self,chromLenDict={},binsize=200):
"""Use this function to return a dictionary of BinKeeperII
objects.
chromLenDict is a dictionary for chromosome length like
{'chr1':100000,'chr2':200000}
bin is in bps. for detail, check BinKeeper.
"""
data = {}
for i in self.fhd:
if i.startswith("track"):
continue
elif i.startswith("#"):
continue
elif i.startswith("browse"):
continue
else:
(chrom,startpos,endpos,value)=i.split()
if not data.has_key(chrom):
chrlength = chromLenDict.setdefault(chrom,250000000) + 10000000
data.setdefault(chrom,BinKeeperII(binsize=binsize,chromosomesize=chrlength))
data[chrom].add(int(startpos),int(endpos),float(value))
self.fhd.seek(0)
return data | PypiClean |
/GaiaXPy-2.1.0.tar.gz/GaiaXPy-2.1.0/gaiaxpy/generator/synthetic_photometry_generator.py | from configparser import ConfigParser
from gaiaxpy.config.paths import config_ini_file
from gaiaxpy.core.satellite import BANDS
from gaiaxpy.spectrum.sampled_basis_functions import SampledBasisFunctions
from gaiaxpy.spectrum.single_synthetic_photometry import SingleSyntheticPhotometry
from gaiaxpy.spectrum.utils import get_covariance_matrix
from gaiaxpy.spectrum.xp_continuous_spectrum import XpContinuousSpectrum
config_parser = ConfigParser()
config_parser.read(config_ini_file)
class SyntheticPhotometryGenerator(object):
def generate(self, parsed_input_data, extension, output_file, output_format, save_file):
raise ValueError('Method not defined for base class.')
def _get_sampled_basis_functions(self, xp_sampling, xp_sampling_grid):
return {band: SampledBasisFunctions.from_design_matrix(xp_sampling_grid, xp_sampling[band]) for band in BANDS}
def _create_photometry_list(self, parsed_input_data, photometric_system, sampled_basis_func, xp_merge):
parsed_input_data_dict = parsed_input_data.to_dict('records')
return (_generate_synthetic_photometry(row, sampled_basis_func, xp_merge, photometric_system) for row in
parsed_input_data_dict)
def _generate_synthetic_photometry(row, design_matrix, merge, photometric_system):
"""
Create the synthetic photometry from the input continuously-represented mean spectrum and design matrix.
Args:
row (DataFrame): Single row in a DataFrame containing the entry for one source in the mean spectra file. This
will include columns for both bands (although one could be missing).
design_matrix (ndarray): 2D array containing the basis functions sampled for the specific photometric system.
merge (dict): Dictionary containing an array of weights per BP and one for RP. These have one value per sample
and define the contributions from BP and RP to the joined absolute spectrum.
photometric_system (obj): Photometric system object containing the zero-points.
Returns:
SingleSyntheticPhotometry: The output synthetic photometry.
"""
cont_dict = {band: XpContinuousSpectrum(row['source_id'], band.upper(), row[f'{band}_coefficients'],
get_covariance_matrix(row, band), row[f'{band}_standard_deviation'])
for band in BANDS}
return SingleSyntheticPhotometry(row['source_id'], cont_dict, design_matrix, merge, photometric_system) | PypiClean |
/IRCLogParser-1.0.6.tar.gz/IRCLogParser-1.0.6/lib/vis.py | import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from sklearn.metrics import mean_squared_error
import config
import util
import igraph
from random import randint
import math
import matplotlib.pyplot as plt
import os
import in_out.saver as saver
from numpy.random import normal
from scipy.optimize import curve_fit
from scipy import stats
import plotly.plotly as py
py.sign_in('rohangoel963', 'vh6le8no26')
import plotly.graph_objs as go
from numpy import genfromtxt
import glob
def generate_probability_distribution(data, initial_rows_filter):
"""
Normalises y coordinates, dividing it by sum of all entries of y coordiantes
Args:
data(list of list): list of list representation csv data (with 2 coordinates)
initial_rows_filter(int): analysis on first how many rows
Returns:
x-coordinate (list)
freq (list) normalised-y-coordinates
"""
topRows = [int(x[1]) for x in data[:initial_rows_filter]]
total = sum(topRows)
freq = [x/float(total) for x in topRows]
return range(0, initial_rows_filter), freq
# FOR CL and RT anaylysis
def exponential_curve_fit_and_plot(data, initial_rows_filter, output_directory, output_file_name):
"""
Fit to an expontial curve and draw the x-y data after filtering the intial initial_rows_filter rows
Args:
data(list of list): list of list representation csv data (with 2 coordinates)
initial_rows_filter(int): analysis on first how many rows
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
a (int) : curve fit variable for the equation a * np.exp(-b * x) + c
b (int) : curve fit variable for the equation a * np.exp(-b * x) + c
c (int) : curve fit variable for the equation a * np.exp(-b * x) + c
mse (int) : Mean Squared error from the fit
"""
x_pdf, y_pdf = generate_probability_distribution(data, initial_rows_filter)
x = np.array(x_pdf)
y = np.array(y_pdf)
popt, pcov = curve_fit(util.exponential_curve_func, x, y)
[a, b, c] = popt
mse = mean_squared_error(util.exponential_curve_func(x, *popt), y)
if config.DEBUGGER:
print "CURVE FIT", output_file_name, "|", a, b, c, "MSE =", mse
plt.figure()
plt.plot(x, y, 'b-', label="Data")
plt.plot(x, util.exponential_curve_func(x, *popt), 'r-', label="Fitted Curve")
axes = plt.gca()
axes.set_xlim([0, 20])
axes.set_ylim([0, 1])
plt.legend()
# plt.show()
saver.check_if_dir_exists(output_directory)
plt.savefig(output_directory + "/" + output_file_name + ".png")
plt.close()
return [a, b, c, mse]
# Ignoring Initial Zeros in CRT
def exponential_curve_fit_and_plot_x_shifted(data, initial_rows_filter, output_directory, output_file_name):
"""
Fit to an expontial curve and draw the x-y data after filtering the intial initial_rows_filter rows
Also ignores the the input untill first non-zero y-coordinate and shifts the graph along
y axes untill that first non-zero entry
Args:
data(list of list): list of list representation csv data (with 2 coordinates)
initial_rows_filter(int): analysis on first how many rows
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
a (int) : curve fit variable for the equation a * np.exp(-b * x) + c
b (int) : curve fit variable for the equation a * np.exp(-b * x) + c
c (int) : curve fit variable for the equation a * np.exp(-b * x) + c
first_non_zero_index (int): amount by which the graph is shifted along y axis
mse (int) : Mean Squared error from the fit
"""
x_pdf, y_pdf = generate_probability_distribution(data, initial_rows_filter)
first_non_zero_index = -1
if filter(lambda x: x != 0, y_pdf):
first_non_zero_index = y_pdf.index(filter(lambda x: x != 0, y_pdf)[0])
x = np.array(x_pdf[0: initial_rows_filter - first_non_zero_index])
y = np.array(y_pdf[first_non_zero_index:])
popt, pcov = curve_fit(util.exponential_curve_func, x, y)
[a, b, c] = popt
mse = mean_squared_error(util.exponential_curve_func(x, *popt), y)
if config.DEBUGGER:
print "CURVE FIT", output_file_name, "|", a, b, c, "x-shift =", first_non_zero_index, "MSE =", mse
plt.figure()
plt.plot(x, y, 'b-', label="Data")
plt.plot(x, util.exponential_curve_func(x, *popt), 'r-', label="Fitted Curve")
axes = plt.gca()
# axes.set_xlim([0 ,20])
axes.set_ylim([0, 1])
plt.xticks(range(0, 20, 5), xrange(first_non_zero_index, initial_rows_filter, 5), size='small')
plt.legend()
# plt.show()
saver.check_if_dir_exists(output_directory)
plt.savefig(output_directory + "/" + output_file_name + ".png")
plt.close()
return [a, b, c, mse, first_non_zero_index]
def plot_infomap_igraph(nx_graph, membership, output_directory, output_file_name, vertex_label_text=False, show_edges=True):
"""
Plots the informap community generated by igraph
Args:
nx_graph(object): networkx graph object
membership(list): membership generated by infomap.community_infomap
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
vertex_label_text(bool): toggle between lable text and index
show_edges(bool): toggle to disable/enable edges during viz
Returns:
null
"""
if membership is not None:
graph_copy = nx_graph.copy()
edges = []
edges_colors = []
for edge in nx_graph.es():
if membership[edge.tuple[0]] != membership[edge.tuple[1]]:
edges.append(edge)
# edges_colors.append("#55555520")
edges_colors.append("#00000000")
else:
edges_colors.append("#00000099")
graph_copy.delete_edges(edges)
layout = graph_copy.layout("kk")
nx_graph.es["color"] = edges_colors
else:
layout = nx_graph.layout("kk")
nx_graph.es["color"] = "gray"
visual_style = {}
visual_style["vertex_label_dist"] = 0
visual_style["vertex_label_size"] = 18
if show_edges:
visual_style["edge_color"] = nx_graph.es["color"]
else:
visual_style["edge_color"] = "#00000000"
visual_style["vertex_size"] = 32
visual_style["layout"] = layout
visual_style["bbox"] = (1024, 768)
visual_style["margin"] = 40
visual_style["edge_label"] = nx_graph.es["weight"]
visual_style["edge_width"] = igraph.rescale(nx_graph.es['weight'], out_range=(1, 10))
for vertex in nx_graph.vs():
if vertex_label_text:
vertex["label"] = vertex["id"]
else:
vertex["label"] = vertex.index
if membership is not None:
colors = []
for i in range(0, max(membership)+1):
colors.append('%06X' % randint(0, 0xFFFFFF))
for vertex in nx_graph.vs():
vertex["vertex_shape"] = "circle"
vertex["color"] = str('#') + colors[membership[vertex.index]]
# coloring for channels vs users
# vertex["vertex_shape"] = "square" if int(vertex["id"]) >= 1000000 else "circle"
# vertex["color"] = "red" if int(vertex["id"]) >= 1000000 else "#00ff00"
visual_style["vertex_color"] = nx_graph.vs["color"]
visual_style["vertex_shape"] = nx_graph.vs["vertex_shape"]
saver.check_if_dir_exists(output_directory)
igraph.plot(nx_graph, (output_directory + "/" + output_file_name + ".png"), **visual_style)
if config.DEBUGGER:
print "INFOMAPS visualisation for", output_file_name, "completed"
def generate_log_plots(filter_val, plot_data, output_directory, output_file_name):
"""
Generate log plots for given time frame selecting first filter_val number ofan
elements and plotting log of value on y axis.
Args:
filter_val (int): number of values to be used from data for plotting
plot_data (list of list): data to be plotted
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
null
"""
sum_each_row = []
for row in plot_data[2:]: #ignore degree 0 and text, starting from degree 1
sum_each_row.append(sum(row[1:]))
# print sum_each_row
x_axis_log = [math.log(i) for i in xrange(1, filter_val)] # ignore degree 0
y_axis_log = [math.log(i) if i>0 else 0 for i in sum_each_row[1:filter_val] ] # ignore degree 01
calc_plot_linear_fit(x_axis_log, y_axis_log, output_file_name, output_directory)
def calc_plot_linear_fit(x_in, y_in, output_directory, output_file_name):
"""
Calculate and plot linar fit for data
Args:
x_in (list of int): x_axis data
y_in (list of int): y_axis data
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
null
"""
# get x and y vectors
x = np.array(x_in)
y = np.array(y_in)
slope, intercept, r_value, p_value, std_err = stats.linregress(x_in, y_in)
line = [slope*xi+intercept for xi in x_in]
print str(slope)+"\t"+str(intercept)+"\t"+str(r_value**2)+"\t"+str(mean_squared_error(y, line))
saver.check_if_dir_exists(output_directory)
if config.USE_PYPLOT:
def trace_helper_pyplot(x, y, label, color):
return go.Scatter(
x=x,
y=y,
mode='lines',
marker=go.Marker(color=color),
name=label
)
trace1 = trace_helper_pyplot(x, y, 'Data', 'rgb(255, 127, 14)')
trace2 = trace_helper_pyplot(x, line, 'Fit', 'rgb(31, 119, 180)')
layout = go.Layout(
title='DegreeNode',
xaxis=go.XAxis(zerolinecolor='rgb(255,255,255)', gridcolor='rgb(255,255,255)'),
# yaxis=go.YAxis(zerolinecolor='rgb(255,255,255)', gridcolor='rgb(255,255,255)')
)
data = [trace1, trace2]
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, output_directory+"/"+output_file_name + ".png")
else:
# graph config
axes = plt.gca()
axes.set_xlim([0, 3])
axes.set_ylim([0, 6])
plt.xlabel("log(degree)")
plt.ylabel("log(no_of_nodes)")
# fit with np.polyfit
m, b = np.polyfit(x, y, 1)
plt.plot(x, y, '-')
plt.plot(x, m*x + b, '-')
plt.legend(['Data', 'Fit'], loc='upper right')
plt.savefig(output_directory+"/" + output_file_name+".png")
plt.close()
def generate_group_bar_charts(y_values, x_values, trace_header, output_directory, output_file_name):
"""
Plots multiple bar graphs on same graph
example usage:
generate_group_bar_charts([
[5.10114882, 5.0194652482, 4.9908093076],
[4.5824497358, 4.7083614037, 4.3812775722],
[2.6839471308, 3.0441476209, 3.6403820447]
], ['#kubuntu-devel', '#ubuntu-devel', '#kubuntu'],
['head1', 'head2', 'head3'], '/home/rohan/Desktop/', 'multi_box'
)
Args:
x_in (list of int): x_axis data
y_in (list of int): y_axis data
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
null
"""
data = [
go.Bar(
x=x_values,
y=y_values[i],
name=trace_header[i]
) for i in range(len(y_values))
]
layout = go.Layout(
barmode='group'
)
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, output_directory + "/" + output_file_name+".png")
def csv_heatmap_generator_plotly(in_directory, output_directory, output_file_name):
"""
Plots heatmaps for all the csv files in the given directory
Args:
in_directory (str): location of input csv files
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
null
"""
file_list = glob.glob(in_directory+"*.csv")
for file in file_list:
csv_data = genfromtxt(file, delimiter=',')
trace = go.Heatmap(
z=csv_data,
x=list(range(48)),
y=list(range(1, 12)),
colorscale=[
[0, 'rgb(255, 255, 204)'],
[0.13, 'rgb(255, 237, 160)'],
[0.25, 'rgb(254, 217, 118)'],
[0.38, 'rgb(254, 178, 76)'],
[0.5, 'rgb(253, 141, 60)'],
[0.63, 'rgb(252, 78, 42)'],
[0.75, 'rgb(227, 26, 28)'],
[0.88, 'rgb(189, 0, 38)'],
[1.0, 'rgb(128, 0, 38)']
]
)
data = [trace]
layout = go.Layout(title='HeatMap', width=800, height=640)
fig = go.Figure(data=data, layout=layout)
py.image.save_as(fig, filename=in_directory+file[file.rfind("/")+1:-4]+'_heatmap.png')
def matplotlob_csv_heatmap_generator(csv_file, output_directory, output_file_name):
"""
Plots heatmaps for all the csv files in the given directory
Can be used as a script for generating heatmaps, faster alternative to plotly
Args:
in_directory (str): location of input csv files
output_drectory(str): location to save graph
output_file_name(str): name of the image file to be saved
Returns:
null
"""
column_labels = map(str, range(1, 32))
row_labels = map(str, range(1, 49))
data = genfromtxt(csv_file, delimiter=',')
print(data)
fig, ax = plt.subplots(figsize=(10, 10))
heatmap = ax.pcolor(data, cmap=plt.cm.Reds)
cbar = plt.colorbar(heatmap)
def np_arrange_helper(data, disp):
return np.arange(data) + disp
# put the major ticks at the middle of each cell
ax.set_xticks(np_arrange_helper(data.shape[0], 0.5), minor=False)
ax.set_yticks(np_arrange_helper(data.shape[1], 0.5), minor=False)
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticklabels(row_labels, minor=False)
ax.set_yticklabels(column_labels, minor=False)
plt.savefig(output_directory+"/" + output_file_name+".png")
plt.close() | PypiClean |
/AMFM_decompy-1.0.11.tar.gz/AMFM_decompy-1.0.11/amfm_decompy/pYAAPT.py | import numpy as np
import numpy.lib.stride_tricks as stride_tricks
from scipy.signal import firwin, medfilt, lfilter
from scipy.signal.windows import hann, kaiser
import scipy.interpolate as scipy_interp
import amfm_decompy.basic_tools as basic
"""
--------------------------------------------
Classes.
--------------------------------------------
"""
"""
Auxiliary class to handle the class properties.
"""
class ClassProperty(object):
def __init__(self, initval=None):
self.val = initval
def __get__(self, obj, objtype):
return self.val
def __set__(self, obj, val):
self.val = val
"""
Creates a pitch object.
"""
class PitchObj(object):
PITCH_HALF = ClassProperty(0)
PITCH_HALF_SENS = ClassProperty(2.9)
PITCH_DOUBLE = ClassProperty(0)
PITCH_DOUBLE_SENS = ClassProperty(2.9)
SMOOTH_FACTOR = ClassProperty(5)
SMOOTH = ClassProperty(5)
PTCH_TYP = ClassProperty(100.0)
def __init__(self, frame_size, frame_jump, nfft=8192):
self.nfft = nfft
self.frame_size = frame_size
self.frame_jump = frame_jump
self.noverlap = self.frame_size-self.frame_jump
def set_energy(self, energy, threshold):
self.mean_energy = np.mean(energy)
self.energy = energy/self.mean_energy
self.vuv = (self.energy > threshold)
def set_frames_pos(self, frames_pos):
self.frames_pos = frames_pos
self.nframes = len(self.frames_pos)
def set_values(self, samp_values, file_size, interp_tech='pchip'):
self.samp_values = samp_values
self.fix()
self.values = self.upsample(self.samp_values, file_size, 0, 0,
interp_tech)
self.edges = self.edges_finder(self.values)
self.interpolate()
self.values_interp = self.upsample(self.samp_interp, file_size,
self.samp_interp[0],
self.samp_interp[-1], interp_tech)
"""
For the voiced/unvoiced version of the pitch data, finds the n samples where
the transitions between these two states occur.
"""
def edges_finder(self, values):
vec1 = (np.abs(values[1:]+values[:-1]) > 0)
vec2 = (np.abs(values[1:]*values[:-1]) == 0)
edges = np.logical_and(vec1, vec2)
# The previous logical operation detects where voiced/unvoiced transitions
# occur. Thus, a 'True' in the edges[n] sample indicates that the sample
# value[n+1] has a different state than value[n](i.e. if values[n] is
# voiced, then values[n+1] is unvoiced - and vice-versa). Consequently,
# the last sample from edges array will always be 'False' and is not
# calculated (because "there is no n+1 sample" for it. That's why
# len(edges) = len(values)-1). However, just for sake of comprehension
# (and also to avoid python warnings about array length mismatchs), I
# add a 'False' to edges the array. But in pratice, this 'False' is
# useless.
edges = np.append(edges,[False])
index = np.arange(len(values))
index = index[edges > 0]
return index.tolist()
"""
This method corresponds to the first half of the ptch_fix.m file. It tries
to fix half pitch and double pitch errors.
"""
def fix(self):
if self.PITCH_HALF > 0:
nz_pitch = self.samp_values[self.samp_values > 0]
idx = self.samp_values < (np.mean(nz_pitch)-self.PITCH_HALF_SENS *
np.std(nz_pitch))
if self.PITCH_HALF == 1:
self.samp_values[idx] = 0
elif self.PITCH_HALF == 2:
self.samp_values[idx] = 2*self.samp_values[idx]
if self.PITCH_DOUBLE > 0:
nz_pitch = self.samp_values[self.samp_values > 0]
idx = self.samp_values > (np.mean(nz_pitch)+self.PITCH_DOUBLE_SENS *
np.std(nz_pitch))
if self.PITCH_DOUBLE == 1:
self.samp_values[idx] = 0
elif self.PITCH_DOUBLE == 2:
self.samp_values[idx] = 0.5*self.samp_values[idx]
"""
Corresponds to the second half of the ptch_fix.m file. Creates the
interpolated pitch data.
"""
def interpolate(self):
pitch = np.zeros((self.nframes))
pitch[:] = self.samp_values
pitch2 = medfilt(self.samp_values, self.SMOOTH_FACTOR)
# This part in the original code is kind of confused and caused
# some problems with the extrapolated points before the first
# voiced frame and after the last voiced frame. So, I made some
# small modifications in order to make it work better.
edges = self.edges_finder(pitch)
first_sample = pitch[0]
last_sample = pitch[-1]
if len(np.nonzero(pitch2)[0]) < 2:
pitch[pitch == 0] = self.PTCH_TYP
else:
nz_pitch = pitch2[pitch2 > 0]
pitch2 = scipy_interp.pchip(np.nonzero(pitch2)[0],
nz_pitch)(range(self.nframes))
pitch[pitch == 0] = pitch2[pitch == 0]
if self.SMOOTH > 0:
pitch = medfilt(pitch, self.SMOOTH_FACTOR)
try:
if first_sample == 0:
pitch[:edges[0]-1] = pitch[edges[0]]
if last_sample == 0:
pitch[edges[-1]+1:] = pitch[edges[-1]]
except:
pass
self.samp_interp = pitch
"""
Upsample the pitch data so that it length becomes the same as the speech
value.
"""
def upsample(self, samp_values, file_size, first_samp=0, last_samp=0,
interp_tech='pchip'):
if interp_tech == 'step':
beg_pad = int((self.noverlap)/2)
up_version = np.zeros((file_size))
up_version[:beg_pad] = first_samp
up_version[beg_pad:beg_pad+self.frame_jump*self.nframes] = \
np.repeat(samp_values, self.frame_jump)
up_version[beg_pad+self.frame_jump*self.nframes:] = last_samp
elif interp_tech in ['pchip', 'spline']:
if np.amin(samp_values) > 0:
if interp_tech == 'pchip':
up_version = scipy_interp.pchip(self.frames_pos,
samp_values)(range(file_size))
elif interp_tech == 'spline':
tck, u_original = scipy_interp.splprep(
[self.frames_pos, samp_values],
u=self.frames_pos)
up_version = scipy_interp.splev(range(file_size), tck)[1]
else:
beg_pad = int((self.noverlap)/2)
up_version = np.zeros((file_size))
up_version[:beg_pad] = first_samp
voiced_frames = np.nonzero(samp_values)[0]
edges = np.nonzero((voiced_frames[1:]-voiced_frames[:-1]) > 1)[0]
edges = np.insert(edges, len(edges), len(voiced_frames)-1)
voiced_frames = np.split(voiced_frames, edges+1)[:-1]
for frame in voiced_frames:
up_interval = self.frames_pos[frame]
tot_interval = np.arange(int(up_interval[0]-(self.frame_jump/2)),
int(up_interval[-1]+(self.frame_jump/2)))
if interp_tech == 'pchip' and len(frame) > 2:
up_version[tot_interval] = scipy_interp.pchip(
up_interval,
samp_values[frame])(tot_interval)
elif interp_tech == 'spline' and len(frame) > 3:
tck, u_original = scipy_interp.splprep(
[up_interval, samp_values[frame]],
u=up_interval)
up_version[tot_interval] = scipy_interp.splev(tot_interval, tck)[1]
# MD: In case len(frame)==2, above methods fail.
#Use linear interpolation instead.
elif len(frame) > 1:
up_version[tot_interval] = scipy_interp.interp1d(
up_interval,
samp_values[frame],
fill_value='extrapolate')(tot_interval)
elif len(frame) == 1:
up_version[tot_interval] = samp_values[frame]
up_version[beg_pad+self.frame_jump*self.nframes:] = last_samp
return up_version
"""
Creates a bandpass filter object.
"""
class BandpassFilter(object):
def __init__(self, fs, parameters):
fs_min = 1000.0
if (fs > fs_min):
dec_factor = parameters['dec_factor']
else:
dec_factor = 1
filter_order = parameters['bp_forder']
f_hp = parameters['bp_low']
f_lp = parameters['bp_high']
f1 = f_hp/(fs/2)
f2 = f_lp/(fs/2)
self.b = firwin(filter_order+1, [f1, f2], pass_zero=False)
self.a = 1
self.dec_factor = dec_factor
"""
--------------------------------------------
Main function.
--------------------------------------------
"""
def yaapt(signal, **kwargs):
# Rename the YAAPT v4.0 parameter "frame_lengtht" to "tda_frame_length"
# (if provided).
if 'frame_lengtht' in kwargs:
if 'tda_frame_length' in kwargs:
warning_str = 'WARNING: Both "tda_frame_length" and "frame_lengtht" '
warning_str += 'refer to the same parameter. Therefore, the value '
warning_str += 'of "frame_lengtht" is going to be discarded.'
print(warning_str)
else:
kwargs['tda_frame_length'] = kwargs.pop('frame_lengtht')
#---------------------------------------------------------------
# Set the default values for the parameters.
#---------------------------------------------------------------
parameters = {}
parameters['frame_length'] = kwargs.get('frame_length', 35.0) #Length of each analysis frame (ms)
# WARNING: In the original MATLAB YAAPT 4.0 code the next parameter is called
# "frame_lengtht" which is quite similar to the previous one "frame_length".
# Therefore, I've decided to rename it to "tda_frame_length" in order to
# avoid confusion between them. Nevertheless, both inputs ("frame_lengtht"
# and "tda_frame_length") are accepted when the function is called.
parameters['tda_frame_length'] = \
kwargs.get('tda_frame_length', 35.0) #Frame length employed in the time domain analysis (ms)
parameters['frame_space'] = kwargs.get('frame_space', 10.0) #Spacing between analysis frames (ms)
parameters['f0_min'] = kwargs.get('f0_min', 60.0) #Minimum F0 searched (Hz)
parameters['f0_max'] = kwargs.get('f0_max', 400.0) #Maximum F0 searched (Hz)
parameters['fft_length'] = kwargs.get('fft_length', 8192) #FFT length
parameters['bp_forder'] = kwargs.get('bp_forder', 150) #Order of band-pass filter
parameters['bp_low'] = kwargs.get('bp_low', 50.0) #Low frequency of filter passband (Hz)
parameters['bp_high'] = kwargs.get('bp_high', 1500.0) #High frequency of filter passband (Hz)
parameters['nlfer_thresh1'] = kwargs.get('nlfer_thresh1', 0.75) #NLFER boundary for voiced/unvoiced decisions
parameters['nlfer_thresh2'] = kwargs.get('nlfer_thresh2', 0.1) #Threshold for NLFER definitely unvoiced
parameters['shc_numharms'] = kwargs.get('shc_numharms', 3) #Number of harmonics in SHC calculation
parameters['shc_window'] = kwargs.get('shc_window', 40.0) #SHC window length (Hz)
parameters['shc_maxpeaks'] = kwargs.get('shc_maxpeaks', 4) #Maximum number of SHC peaks to be found
parameters['shc_pwidth'] = kwargs.get('shc_pwidth', 50.0) #Window width in SHC peak picking (Hz)
parameters['shc_thresh1'] = kwargs.get('shc_thresh1', 5.0) #Threshold 1 for SHC peak picking
parameters['shc_thresh2'] = kwargs.get('shc_thresh2', 1.25) #Threshold 2 for SHC peak picking
parameters['f0_double'] = kwargs.get('f0_double', 150.0) #F0 doubling decision threshold (Hz)
parameters['f0_half'] = kwargs.get('f0_half', 150.0) #F0 halving decision threshold (Hz)
parameters['dp5_k1'] = kwargs.get('dp5_k1', 11.0) #Weight used in dynamic program
parameters['dec_factor'] = kwargs.get('dec_factor', 1) #Factor for signal resampling
parameters['nccf_thresh1'] = kwargs.get('nccf_thresh1', 0.3) #Threshold for considering a peak in NCCF
parameters['nccf_thresh2'] = kwargs.get('nccf_thresh2', 0.9) #Threshold for terminating serach in NCCF
parameters['nccf_maxcands'] = kwargs.get('nccf_maxcands', 3) #Maximum number of candidates found
parameters['nccf_pwidth'] = kwargs.get('nccf_pwidth', 5) #Window width in NCCF peak picking
parameters['merit_boost'] = kwargs.get('merit_boost', 0.20) #Boost merit
parameters['merit_pivot'] = kwargs.get('merit_pivot', 0.99) #Merit assigned to unvoiced candidates in
#defintely unvoiced frames
parameters['merit_extra'] = kwargs.get('merit_extra', 0.4) #Merit assigned to extra candidates
#in reducing F0 doubling/halving errors
parameters['median_value'] = kwargs.get('median_value', 7) #Order of medial filter
parameters['dp_w1'] = kwargs.get('dp_w1', 0.15) #DP weight factor for V-V transitions
parameters['dp_w2'] = kwargs.get('dp_w2', 0.5) #DP weight factor for V-UV or UV-V transitions
parameters['dp_w3'] = kwargs.get('dp_w3', 0.1) #DP weight factor of UV-UV transitions
parameters['dp_w4'] = kwargs.get('dp_w4', 0.9) #Weight factor for local costs
# Exclusive from pYAAPT.
parameters['spec_pitch_min_std'] = kwargs.get('spec_pitch_min_std', 0.05)
#Weight factor that sets a minimum
#spectral pitch standard deviation,
#which is calculated as
#min_std = pitch_avg*spec_pitch_min_std
#---------------------------------------------------------------
# Create the signal objects and filter them.
#---------------------------------------------------------------
fir_filter = BandpassFilter(signal.fs, parameters)
nonlinear_sign = basic.SignalObj(signal.data**2, signal.fs)
signal.filtered_version(fir_filter)
nonlinear_sign.filtered_version(fir_filter)
#---------------------------------------------------------------
# Create the pitch object.
#---------------------------------------------------------------
nfft = parameters['fft_length']
frame_size = int(np.fix(parameters['frame_length']*signal.fs/1000))
frame_jump = int(np.fix(parameters['frame_space']*signal.fs/1000))
pitch = PitchObj(frame_size, frame_jump, nfft)
assert pitch.frame_size > 15, 'Frame length value {} is too short.'.format(pitch.frame_size)
assert pitch.frame_size < 2048, 'Frame length value {} exceeds the limit.'.format(pitch.frame_size)
#---------------------------------------------------------------
# Calculate NLFER and determine voiced/unvoiced frames.
#---------------------------------------------------------------
nlfer(signal, pitch, parameters)
#---------------------------------------------------------------
# Calculate an approximate pitch track from the spectrum.
#---------------------------------------------------------------
spec_pitch, pitch_std = spec_track(nonlinear_sign, pitch, parameters)
#---------------------------------------------------------------
# Temporal pitch tracking based on NCCF.
#---------------------------------------------------------------
time_pitch1, time_merit1 = time_track(signal, spec_pitch, pitch_std, pitch,
parameters)
time_pitch2, time_merit2 = time_track(nonlinear_sign, spec_pitch, pitch_std,
pitch, parameters)
# Added in YAAPT 4.0
if time_pitch1.shape[1] < len(spec_pitch):
len_time = time_pitch1.shape[1]
len_spec = len(spec_pitch)
time_pitch1 = np.concatenate((time_pitch1, np.zeros((3,len_spec-len_time),
dtype=time_pitch1.dtype)),axis=1)
time_pitch2 = np.concatenate((time_pitch2, np.zeros((3,len_spec-len_time),
dtype=time_pitch2.dtype)),axis=1)
time_merit1 = np.concatenate((time_merit1, np.zeros((3,len_spec-len_time),
dtype=time_merit1.dtype)),axis=1)
time_merit2 = np.concatenate((time_merit2, np.zeros((3,len_spec-len_time),
dtype=time_merit2.dtype)),axis=1)
#---------------------------------------------------------------
# Refine pitch candidates.
#---------------------------------------------------------------
ref_pitch, ref_merit = refine(time_pitch1, time_merit1, time_pitch2,
time_merit2, spec_pitch, pitch, parameters)
#---------------------------------------------------------------
# Use dyanamic programming to determine the final pitch.
#---------------------------------------------------------------
final_pitch = dynamic(ref_pitch, ref_merit, pitch, parameters)
pitch.set_values(final_pitch, signal.size)
return pitch
"""
--------------------------------------------
Side functions.
--------------------------------------------
"""
"""
Normalized Low Frequency Energy Ratio function. Corresponds to the nlfer.m file,
but instead of returning the results to them function, encapsulates them in the
pitch object.
"""
def nlfer(signal, pitch, parameters):
#---------------------------------------------------------------
# Set parameters.
#---------------------------------------------------------------
N_f0_min = np.around((parameters['f0_min']*2/float(signal.new_fs))*pitch.nfft)
N_f0_max = np.around((parameters['f0_max']/float(signal.new_fs))*pitch.nfft)
window = hann(pitch.frame_size+2)[1:-1]
data = np.zeros((signal.size)) #Needs other array, otherwise stride and
data[:] = signal.filtered #windowing will modify signal.filtered
#---------------------------------------------------------------
# Main routine.
#---------------------------------------------------------------
samples = np.arange(int(np.fix(float(pitch.frame_size)/2)),
signal.size-int(np.fix(float(pitch.frame_size)/2)),
pitch.frame_jump)
data_matrix = np.empty((len(samples), pitch.frame_size))
data_matrix[:, :] = stride_matrix(data, len(samples),
pitch.frame_size, pitch.frame_jump)
data_matrix *= window
specData = np.fft.rfft(data_matrix, pitch.nfft)
frame_energy = np.abs(specData[:, int(N_f0_min-1):int(N_f0_max)]).sum(axis=1)
pitch.set_energy(frame_energy, parameters['nlfer_thresh1'])
pitch.set_frames_pos(samples)
"""
Spectral pitch tracking. Computes estimates of pitch using nonlinearly processed
speech (typically square or absolute value) and frequency domain processing.
Search for frequencies which have energy at multiplies of that frequency.
Corresponds to the spec_trk.m file.
"""
def spec_track(signal, pitch, parameters):
#---------------------------------------------------------------
# Set parameters.
#---------------------------------------------------------------
nframe_size = pitch.frame_size*2
maxpeaks = parameters['shc_maxpeaks']
delta = signal.new_fs/pitch.nfft
window_length = int(np.fix(parameters['shc_window']/delta))
half_window_length = int(np.fix(float(window_length)/2))
if not(window_length % 2):
window_length += 1
max_SHC = int(np.fix((parameters['f0_max']+parameters['shc_pwidth']*2)/delta))
min_SHC = int(np.ceil(parameters['f0_min']/delta))
num_harmonics = parameters['shc_numharms']
#---------------------------------------------------------------
# Main routine.
#---------------------------------------------------------------
cand_pitch = np.zeros((maxpeaks, pitch.nframes))
cand_merit = np.ones((maxpeaks, pitch.nframes))
data = np.append(signal.filtered,
np.zeros((1, nframe_size +
((pitch.nframes-1)*pitch.frame_jump-signal.size))))
#Compute SHC for voiced frame
window = kaiser(nframe_size, 0.5)
SHC = np.zeros((max_SHC))
row_mat_list = np.array([np.empty((max_SHC-min_SHC+1, window_length))
for x in range(num_harmonics+1)])
magnitude = np.zeros(int((half_window_length+(pitch.nfft/2)+1)))
for frame in np.where(pitch.vuv)[0].tolist():
fir_step = frame*pitch.frame_jump
data_slice = data[fir_step:fir_step+nframe_size]*window
data_slice -= np.mean(data_slice)
magnitude[half_window_length:] = np.abs(np.fft.rfft(data_slice,
pitch.nfft))
for idx,row_mat in enumerate(row_mat_list):
row_mat[:, :] = stride_matrix(magnitude[min_SHC*(idx+1):],
max_SHC-min_SHC+1,
window_length, idx+1)
SHC[min_SHC-1:max_SHC] = np.sum(np.prod(row_mat_list,axis=0),axis=1)
cand_pitch[:, frame], cand_merit[:, frame] = \
peaks(SHC, delta, maxpeaks, parameters)
#Extract the pitch candidates of voiced frames for the future pitch selection.
spec_pitch = cand_pitch[0, :]
voiced_cand_pitch = cand_pitch[:, cand_pitch[0, :] > 0]
voiced_cand_merit = cand_merit[:, cand_pitch[0, :] > 0]
num_voiced_cand = len(voiced_cand_pitch[0, :])
avg_voiced = np.mean(voiced_cand_pitch[0, :])
std_voiced = np.std(voiced_cand_pitch[0, :])
#Interpolation of the weigthed candidates.
delta1 = abs((voiced_cand_pitch - 0.8*avg_voiced))*(3-voiced_cand_merit)
index = delta1.argmin(0)
voiced_peak_minmrt = voiced_cand_pitch[index, range(num_voiced_cand)]
voiced_merit_minmrt = voiced_cand_merit[index, range(num_voiced_cand)]
voiced_peak_minmrt = medfilt(voiced_peak_minmrt,
max(1, parameters['median_value']-2))
#Replace the lowest merit candidates by the median smoothed ones
#computed from highest merit peaks above.
voiced_cand_pitch[index, range(num_voiced_cand)] = voiced_peak_minmrt
voiced_cand_merit[index, range(num_voiced_cand)] = voiced_merit_minmrt
#Use dynamic programming to find best overal path among pitch candidates.
#Dynamic weight for transition costs balance between local and
#transition costs.
weight_trans = parameters['dp5_k1']*std_voiced/avg_voiced
if num_voiced_cand > 2:
voiced_pitch = dynamic5(voiced_cand_pitch, voiced_cand_merit,
weight_trans, parameters['f0_min'])
voiced_pitch = medfilt(voiced_pitch, max(1, parameters['median_value']-2))
else:
if num_voiced_cand > 0:
voiced_pitch = (np.ones((num_voiced_cand)))*150.0
else:
voiced_pitch = np.array([150.0])
cand_pitch[0, 0] = 0
pitch_avg = np.mean(voiced_pitch)
pitch_std = np.maximum(np.std(voiced_pitch), pitch_avg*parameters['spec_pitch_min_std'])
spec_pitch[cand_pitch[0, :] > 0] = voiced_pitch[:]
if (spec_pitch[0] < pitch_avg/2):
spec_pitch[0] = pitch_avg
if (spec_pitch[-1] < pitch_avg/2):
spec_pitch[-1] = pitch_avg
spec_voiced = np.array(np.nonzero(spec_pitch)[0])
spec_pitch = scipy_interp.pchip(spec_voiced,
spec_pitch[spec_voiced])(range(pitch.nframes))
spec_pitch = lfilter(np.ones((3))/3, 1.0, spec_pitch)
spec_pitch[0] = spec_pitch[2]
spec_pitch[1] = spec_pitch[3]
return spec_pitch, pitch_std
"""
Temporal pitch tracking.
Corresponds to the tm_trk.m file.
"""
def time_track(signal, spec_pitch, pitch_std, pitch, parameters):
#---------------------------------------------------------------
# Set parameters.
#---------------------------------------------------------------
tda_frame_length = int(parameters['tda_frame_length']*signal.fs/1000)
tda_noverlap = tda_frame_length-pitch.frame_jump
tda_nframes = int((len(signal.data)-tda_noverlap)/pitch.frame_jump)
len_spectral = len(spec_pitch)
if tda_nframes < len_spectral:
spec_pitch = spec_pitch[:tda_nframes]
elif tda_nframes > len_spectral:
tda_nframes = len_spectral
merit_boost = parameters['merit_boost']
maxcands = parameters['nccf_maxcands']
freq_thresh = 5.0*pitch_std
spec_range = np.maximum(spec_pitch-2.0*pitch_std, parameters['f0_min'])
spec_range = np.vstack((spec_range,
np.minimum(spec_pitch+2.0*pitch_std, parameters['f0_max'])))
time_pitch = np.zeros((maxcands, tda_nframes))
time_merit = np.zeros((maxcands, tda_nframes))
#---------------------------------------------------------------
# Main routine.
#---------------------------------------------------------------
data = np.zeros((signal.size)) #Needs other array, otherwise stride and
data[:] = signal.filtered #windowing will modify signal.filtered
signal_frames = stride_matrix(data, tda_nframes,tda_frame_length,
pitch.frame_jump)
for frame in range(tda_nframes):
lag_min0 = (int(np.fix(signal.new_fs/spec_range[1, frame])) -
int(np.fix(parameters['nccf_pwidth']/2.0)))
lag_max0 = (int(np.fix(signal.new_fs/spec_range[0, frame])) +
int(np.fix(parameters['nccf_pwidth']/2.0)))
phi = crs_corr(signal_frames[frame, :], lag_min0, lag_max0)
time_pitch[:, frame], time_merit[:, frame] = \
cmp_rate(phi, signal.new_fs, maxcands, lag_min0, lag_max0, parameters)
diff = np.abs(time_pitch - spec_pitch)
match1 = (diff < freq_thresh)
match = ((1 - diff/freq_thresh) * match1)
time_merit = (((1+merit_boost)*time_merit) * match)
return time_pitch, time_merit
"""
Refines pitch candidates obtained from NCCF using spectral pitch track and
NLFER energy information.
Corresponds to the refine.m file.
"""
def refine(time_pitch1, time_merit1, time_pitch2, time_merit2, spec_pitch,
pitch, parameters):
#---------------------------------------------------------------
# Set parameters.
#---------------------------------------------------------------
nlfer_thresh2 = parameters['nlfer_thresh2']
merit_pivot = parameters['merit_pivot']
#---------------------------------------------------------------
# Main routine.
#---------------------------------------------------------------
time_pitch = np.append(time_pitch1, time_pitch2, 0)
time_merit = np.append(time_merit1, time_merit2, 0)
maxcands = time_pitch.shape[0]
idx = np.argsort(-time_merit, axis=0)
time_merit.sort(axis=0)
time_merit[:, :] = time_merit[::-1,:]
time_pitch = time_pitch[idx, range(pitch.nframes)]
best_pitch = medfilt(time_pitch[0, :], parameters['median_value'])*pitch.vuv
idx1 = pitch.energy <= nlfer_thresh2
idx2 = (pitch.energy > nlfer_thresh2) & (time_pitch[0, :] > 0)
idx3 = (pitch.energy > nlfer_thresh2) & (time_pitch[0, :] <= 0)
merit_mat = (time_pitch[1:maxcands-1, :] == 0) & idx2
merit_mat = np.insert(merit_mat, [0, maxcands-2],
np.zeros((1, pitch.nframes), dtype=bool), 0)
time_pitch[:, idx1] = 0
time_merit[:, idx1] = merit_pivot
time_pitch[maxcands-1, idx2] = 0.0
time_merit[maxcands-1, idx2] = 1.0-time_merit[0, idx2]
time_merit[merit_mat] = 0.0
time_pitch[0, idx3] = spec_pitch[idx3]
time_merit[0, idx3] = np.minimum(1, pitch.energy[idx3]/2.0)
time_pitch[1:maxcands, idx3] = 0.0
time_merit[1:maxcands, idx3] = 1.0-time_merit[0, idx3]
time_pitch[maxcands-2, :] = best_pitch
non_zero_frames = best_pitch > 0.0
time_merit[maxcands-2, non_zero_frames] = time_merit[0, non_zero_frames]
time_merit[maxcands-2, ~(non_zero_frames)] = 1.0-np.minimum(1,
pitch.energy[~(non_zero_frames)]/2.0)
time_pitch[maxcands-3, :] = spec_pitch
time_merit[maxcands-3, :] = pitch.energy/5.0
return time_pitch, time_merit
"""
Dynamic programming used to compute local and transition cost matrices,
enabling the lowest cost tracking of pitch candidates.
It uses NFLER from the spectrogram and the highly robust spectral F0 track,
plus the merits, for computation of the cost matrices.
Corresponds to the dynamic.m file.
"""
def dynamic(ref_pitch, ref_merit, pitch, parameters):
#---------------------------------------------------------------
# Set parameters.
#---------------------------------------------------------------
num_cands = ref_pitch.shape[0]
best_pitch = ref_pitch[num_cands-2, :]
mean_pitch = np.mean(best_pitch[best_pitch > 0])
dp_w1 = parameters['dp_w1']
dp_w2 = parameters['dp_w2']
dp_w3 = parameters['dp_w3']
dp_w4 = parameters['dp_w4']
#---------------------------------------------------------------
# Main routine.
#---------------------------------------------------------------
local_cost = 1 - ref_merit
trans_cmatrix = np.ones((num_cands, num_cands, pitch.nframes))
ref_mat1 = np.zeros((num_cands, num_cands, pitch.nframes))
ref_mat2 = np.zeros((num_cands, num_cands, pitch.nframes))
idx_mat1 = np.zeros((num_cands, num_cands, pitch.nframes), dtype=bool)
idx_mat2 = np.zeros((num_cands, num_cands, pitch.nframes), dtype=bool)
idx_mat3 = np.zeros((num_cands, num_cands, pitch.nframes), dtype=bool)
ref_mat1[:, :, 1:] = np.tile(ref_pitch[:, 1:].reshape(1, num_cands,
pitch.nframes-1), (num_cands, 1, 1))
ref_mat2[:, :, 1:] = np.tile(ref_pitch[:, :-1].reshape(num_cands, 1,
pitch.nframes-1), (1, num_cands, 1))
idx_mat1[:, :, 1:] = (ref_mat1[:, :, 1:] > 0) & (ref_mat2[:, :, 1:] > 0)
idx_mat2[:, :, 1:] = (((ref_mat1[:, :, 1:] == 0) & (ref_mat2[:, :, 1:] > 0)) |
((ref_mat1[:, :, 1:] > 0) & (ref_mat2[:, :, 1:] == 0)))
idx_mat3[:, :, 1:] = (ref_mat1[:, :, 1:] == 0) & (ref_mat2[:, :, 1:] == 0)
mat1_values = np.abs(ref_mat1-ref_mat2)/mean_pitch
benefit2 = np.insert(np.minimum(1, abs(pitch.energy[:-1]-pitch.energy[1:])),
0, 0)
benefit2 = np.tile(benefit2, (num_cands, num_cands, 1))
trans_cmatrix[idx_mat1] = dp_w1*mat1_values[idx_mat1]
trans_cmatrix[idx_mat2] = dp_w2*(1-benefit2[idx_mat2])
trans_cmatrix[idx_mat3] = dp_w3
trans_cmatrix = trans_cmatrix/dp_w4
path = path1(local_cost, trans_cmatrix, num_cands, pitch.nframes)
final_pitch = ref_pitch[path, range(pitch.nframes)]
return final_pitch
"""
--------------------------------------------
Auxiliary functions.
--------------------------------------------
"""
"""
Computes peaks in a frequency domain function associated with the peaks found
in each frame based on the correlation sequence.
Corresponds to the peaks.m file.
"""
def peaks(data, delta, maxpeaks, parameters):
#---------------------------------------------------------------
# Set parameters.
#---------------------------------------------------------------
PEAK_THRESH1 = parameters['shc_thresh1']
PEAK_THRESH2 = parameters['shc_thresh2']
epsilon = .00000000000001
width = int(np.fix(parameters['shc_pwidth']/delta))
if not(float(width) % 2):
width = width + 1
center = int(np.ceil(width/2))
min_lag = int(np.fix(parameters['f0_min']/delta - center))
max_lag = int(np.fix(parameters['f0_max']/delta + center))
if (min_lag < 1):
min_lag = 1
print('Min_lag is too low and adjusted ({}).'.format(min_lag))
if max_lag > (len(data) - width):
max_lag = len(data) - width
print('Max_lag is too high and adjusted ({}).'.format(max_lag))
pitch = np.zeros((maxpeaks))
merit = np.zeros((maxpeaks))
#---------------------------------------------------------------
# Main routine.
#---------------------------------------------------------------
max_data = max(data[min_lag:max_lag+1])
if (max_data > epsilon):
data = data/max_data
avg_data = np.mean(data[min_lag:max_lag+1])
if (avg_data > 1/PEAK_THRESH1):
pitch = np.zeros((maxpeaks))
merit = np.ones((maxpeaks))
return pitch, merit
#---------------------------------------------------------------
#Step1 (this step was implemented differently than in original version)
#---------------------------------------------------------------
numpeaks = 0
vec_back = (data[min_lag+center+1:max_lag-center+1] >
data[min_lag+center:max_lag-center])
vec_forw = (data[min_lag+center+1:max_lag-center+1] >
data[min_lag+center+2:max_lag-center+2])
above_thresh = (data[min_lag+center+1:max_lag-center+1] >
PEAK_THRESH2*avg_data)
peaks = np.logical_and(np.logical_and(vec_back, vec_forw), above_thresh)
for n in (peaks.ravel().nonzero()[0]+min_lag+center+1).tolist():
if np.argmax(data[n-center:n+center+1]) == center:
if numpeaks >= maxpeaks:
pitch = np.append(pitch, np.zeros((1)))
merit = np.append(merit, np.zeros((1)))
pitch[numpeaks] = float(n)*delta
merit[numpeaks] = data[n]
numpeaks += 1
#---------------------------------------------------------------
#Step2
#---------------------------------------------------------------
if (max(merit)/avg_data < PEAK_THRESH1):
pitch = np.zeros((maxpeaks))
merit = np.ones((maxpeaks))
return pitch, merit
#---------------------------------------------------------------
#Step3
#---------------------------------------------------------------
idx = (-merit).ravel().argsort().tolist()
merit = merit[idx]
pitch = pitch[idx]
numpeaks = min(numpeaks, maxpeaks)
pitch = np.append(pitch[:numpeaks], np.zeros((maxpeaks-numpeaks)))
merit = np.append(merit[:numpeaks], np.zeros((maxpeaks-numpeaks)))
#---------------------------------------------------------------
#Step4
#---------------------------------------------------------------
if (0 < numpeaks < maxpeaks):
pitch[numpeaks:maxpeaks] = pitch[0]
merit[numpeaks:maxpeaks] = merit[0]
else:
pitch = np.zeros((maxpeaks))
merit = np.ones((maxpeaks))
return np.transpose(pitch), np.transpose(merit)
"""
Dynamic programming used to compute local and transition cost matrices,
enabling the lowest cost tracking of pitch candidates.
It uses NFLER from the spectrogram and the highly robust spectral F0 track,
plus the merits, for computation of the cost matrices.
Corresponds to the dynamic5.m file.
"""
def dynamic5(pitch_array, merit_array, k1, f0_min):
num_cand = pitch_array.shape[0]
num_frames = pitch_array.shape[1]
local = 1-merit_array
trans = np.zeros((num_cand, num_cand, num_frames))
trans[:, :, 1:] = abs(pitch_array[:, 1:].reshape(1, num_cand, num_frames-1) -
pitch_array[:, :-1].reshape(num_cand, 1, num_frames-1))/f0_min
trans[:, :, 1:] = 0.05*trans[:, :, 1:] + trans[:, :, 1:]**2
trans = k1*trans
path = path1(local, trans, num_cand, num_frames)
final_pitch = pitch_array[path, range(num_frames)]
return final_pitch
"""
Finds the optimal path with the lowest cost if two matrice(Local cost matrix
and Transition cost) are given.
Corresponds to the path1.m file.
"""
def path1(local, trans, n_lin, n_col):
# Apparently the following lines are somehow kind of useless.
# Therefore, I removed them in the version 1.0.3.
# if n_lin >= 100:
# print 'Stop in Dynamic due to M>100'
# raise KeyboardInterrupt
#
# if n_col >= 1000:
# print 'Stop in Dynamic due to N>1000'
# raise KeyboardInterrupt
PRED = np.zeros((n_lin, n_col), dtype=int)
P = np.ones((n_col), dtype=int)
p_small = np.zeros((n_col), dtype=int)
PCOST = np.zeros((n_lin))
CCOST = np.zeros((n_lin))
PCOST = local[:, 0]
for I in range(1, n_col):
aux_matrix = PCOST+np.transpose(trans[:, :, I])
K = n_lin-np.argmin(aux_matrix[:, ::-1], axis=1)-1
PRED[:, I] = K
CCOST = PCOST[K]+trans[K, range(n_lin), I]
assert CCOST.any() < 1.0E+30, 'CCOST>1.0E+30, Stop in Dynamic'
CCOST = CCOST+local[:, I]
PCOST[:] = CCOST
J = n_lin - np.argmin(CCOST[::-1])-1
p_small[I] = J
P[-1] = p_small[-1]
for I in range(n_col-2, -1, -1):
P[I] = PRED[P[I+1], I+1]
return P
"""
Computes the NCCF (Normalized cross correlation Function) sequence based on
the RAPT algorithm discussed by DAVID TALKIN.
Corresponds to the crs_corr.m file.
"""
def crs_corr(data, lag_min, lag_max):
eps1 = 0.0
data_len = len(data)
N = data_len-lag_max
error_str = 'ERROR: Negative index in the cross correlation calculation of '
error_str += 'the pYAAPT time domain analysis. Please try to increase the '
error_str += 'value of the "tda_frame_length" parameter.'
assert N>0, error_str
phi = np.zeros((data_len))
data -= np.mean(data)
x_j = data[0:N]
x_jr = data[lag_min:lag_max+N]
p = np.dot(x_j, x_j)
x_jr_matrix = stride_matrix(x_jr, lag_max-lag_min, N, 1)
formula_nume = np.dot(x_jr_matrix, x_j)
formula_denom = np.sum(x_jr_matrix*x_jr_matrix, axis=1)*p + eps1
phi[lag_min:lag_max] = formula_nume/np.sqrt(formula_denom)
return phi
"""
Computes pitch estimates and the corresponding merit values associated with the
peaks found in each frame based on the correlation sequence.
Corresponds to the cmp_rate.m file.
"""
def cmp_rate(phi, fs, maxcands, lag_min, lag_max, parameters):
#---------------------------------------------------------------
# Set parameters.
#---------------------------------------------------------------
width = parameters['nccf_pwidth']
center = int(np.fix(width/2.0))
merit_thresh1 = parameters['nccf_thresh1']
merit_thresh2 = parameters['nccf_thresh2']
numpeaks = 0
pitch = np.zeros((maxcands))
merit = np.zeros((maxcands))
#---------------------------------------------------------------
# Main routine.
#(this step was implemented differently than in original version)
#---------------------------------------------------------------
vec_back = (phi[lag_min+center:lag_max-center+1] >
phi[lag_min+center-1:lag_max-center])
vec_forw = (phi[lag_min+center:lag_max-center+1] >
phi[lag_min+center+1:lag_max-center+2])
above_thresh = phi[lag_min+center:lag_max-center+1] > merit_thresh1
peaks = np.logical_and(np.logical_and(vec_back, vec_forw), above_thresh)
peaks = (peaks.ravel().nonzero()[0]+lag_min+center).tolist()
if np.amax(phi) > merit_thresh2 and len(peaks) > 0:
max_point = peaks[np.argmax(phi[peaks])]
pitch[numpeaks] = fs/float(max_point+1)
merit[numpeaks] = np.amax(phi[peaks])
numpeaks += 1
else:
for n in peaks:
if np.argmax(phi[n-center:n+center+1]) == center:
try:
pitch[numpeaks] = fs/float(n+1)
merit[numpeaks] = phi[n]
except:
pitch = np.hstack((pitch, fs/float(n+1)))
merit = np.hstack((merit, phi[n]))
numpeaks += 1
#---------------------------------------------------------------
# Sort the results.
#---------------------------------------------------------------
idx = (-merit).ravel().argsort().tolist()
merit = merit[idx[:maxcands]]
pitch = pitch[idx[:maxcands]]
if (np.amax(merit) > 1.0):
merit = merit/np.amax(merit)
return pitch, merit
"""
--------------------------------------------
Extra functions.
--------------------------------------------
"""
def stride_matrix(vector, n_lin, n_col, hop):
data_matrix = stride_tricks.as_strided(vector, shape=(n_lin, n_col),
strides=(vector.strides[0]*hop, vector.strides[0]))
return data_matrix | PypiClean |
/MCRAMP-0.0.3-py3-none-any.whl/mcramp/scat/rescal.py | from .sprim import SPrim
import numpy as np
import pyopencl as cl
import pyopencl.array as clarr
import os
class SRescal(SPrim):
def __init__(self, target=[0,0,0], E0=0, dE=0, focus_r=0, idx=0, ctx=None, **kwargs):
self.idx = np.uint32(idx)
self.ctx = ctx
self.target = np.array((target[0], target[1], target[2], 0.),
dtype=clarr.vec.float3)
self.E0=np.float32(E0)
self.dE=np.float32(dE)
self.focus_r=np.float32(focus_r)
self.qz_vals=np.array([])
self.qy_vals=np.array([])
self.qx_vals=np.array([])
self.dE_vals=np.array([])
self.p_vals =np.array([])
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'rescal.cl'), mode='r') as f:
self.prg = cl.Program(ctx, f.read()).build(options=r'-I "{}/include"'.format(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
def scatter_prg(self, queue, N, neutron_buf, intersection_buf, iidx_buf):
self.events = np.zeros((N,), dtype=clarr.vec.float8)
mf = cl.mem_flags
self.events_cl = cl.Buffer(self.ctx,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=self.events)
self.neutron_buf = neutron_buf
self.N = N
self.prg.rescal(queue, (N, ),
None,
neutron_buf,
intersection_buf,
iidx_buf,
self.idx,
self.target,
self.E0,
self.dE,
self.focus_r,
self.events_cl)
def data_reduce(self, queue):
neutrons = np.zeros((self.N, ), dtype=clarr.vec.float16)
cl.enqueue_copy(queue, self.events, self.events_cl)
cl.enqueue_copy(queue, neutrons, self.neutron_buf).wait()
eventlist_reduced = self.events[np.where(neutrons["s14"] > 0.0)]
self.qz_vals=np.concatenate((self.qz_vals, eventlist_reduced["s2"]))
self.qy_vals=np.concatenate((self.qy_vals, eventlist_reduced["s1"]))
self.qx_vals=np.concatenate((self.qx_vals, eventlist_reduced["s0"]))
self.dE_vals=np.concatenate((self.dE_vals, eventlist_reduced["s7"]))
self.p_vals =np.concatenate((self.p_vals, eventlist_reduced["s6"]))
def data(self, queue):
return (self.qx_vals, self.qy_vals, self.qz_vals, self.dE_vals, self.p_vals) | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_dua.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"idi\u0253a",
"eby\u00e1mu"
],
"DAY": [
"\u00e9ti",
"m\u0254\u0301s\u00fa",
"kwas\u00fa",
"muk\u0254\u0301s\u00fa",
"\u014bgis\u00fa",
"\u0257\u00f3n\u025bs\u00fa",
"esa\u0253as\u00fa"
],
"MONTH": [
"dim\u0254\u0301di",
"\u014bg\u0254nd\u025b",
"s\u0254\u014b\u025b",
"di\u0253\u00e1\u0253\u00e1",
"emiasele",
"es\u0254p\u025bs\u0254p\u025b",
"madi\u0253\u025b\u0301d\u00ed\u0253\u025b\u0301",
"di\u014bgindi",
"ny\u025bt\u025bki",
"may\u00e9s\u025b\u0301",
"tin\u00edn\u00ed",
"el\u00e1\u014bg\u025b\u0301"
],
"SHORTDAY": [
"\u00e9t",
"m\u0254\u0301s",
"kwa",
"muk",
"\u014bgi",
"\u0257\u00f3n",
"esa"
],
"SHORTMONTH": [
"di",
"\u014bg\u0254n",
"s\u0254\u014b",
"di\u0253",
"emi",
"es\u0254",
"mad",
"di\u014b",
"ny\u025bt",
"may",
"tin",
"el\u00e1"
],
"fullDate": "EEEE d MMMM y",
"longDate": "d MMMM y",
"medium": "d MMM y HH:mm:ss",
"mediumDate": "d MMM y",
"mediumTime": "HH:mm:ss",
"short": "d/M/y HH:mm",
"shortDate": "d/M/y",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "FCFA",
"DECIMAL_SEP": ",",
"GROUP_SEP": "\u00a0",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "dua",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/DendroPy_calver-2023.330.2-py3-none-any.whl/dendropy/interop/paup.py |
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
Wrapper around calls to PAUP*, mainly for testing purposes rather than analysis.
"""
import os
import sys
import subprocess
import tempfile
import re
import csv
import dendropy
from dendropy.utility import textprocessing
from dendropy.utility import error
from dendropy.utility import metavar
from dendropy.utility import container
from dendropy.utility import messaging
from dendropy.utility import filesys
from dendropy.utility import processio
from dendropy.dataio import nexuswriter
_LOG = messaging.get_logger(__name__)
import dendropy
PAUP_PATH = os.environ.get(metavar.DENDROPY_PAUP_PATH_ENVAR, "paup")
if PAUP_PATH == "NONE":
DENDROPY_PAUP_INTEROPERABILITY = False
else:
DENDROPY_PAUP_INTEROPERABILITY = True
STANDARD_PREAMBLE = "set warnreset=no increase=auto warnroot=no warnReset=no warnTree=no warnTSave=no warnBlkName=no errorStop=no errorBeep=no queryBeep=no"
class PaupService(object):
@staticmethod
def call(
paup_commands,
suppress_standard_preamble=False,
ignore_error_returncode=False,
ignore_nonempty_stderr=False,
strip_extraneous_prompts_from_stdout=True,
strip_extraneous_prompts_from_stderr=True,
cwd=None,
env=None,
paup_path=PAUP_PATH,
timeout=None,
):
"""
Executes a sequence of commands in PAUP* and returns the results.
Parameters
----------
paup_commands : iterable of strings
A list or some other iterable of strings representing PAUP
commands.
suppress_standard_preamble : bool
If |True|, then the command sequence will not be prefaced by the
standard preamble.
ignore_error_returncode : bool
If |True|, then a non-0 return code from the PAUP process will not
result in an exception being raised.
ignore_nonempty_stderr : bool
If |True|, then the PAUP process writing to standard error will not
result in an exception being raised.
strip_extraneous_prompts_from_stdout : bool
If |True|, then all occurrences of 'paup>' will be removed from the
standard output contents.
strip_extraneous_prompts_from_stderr : bool
If |True|, then all occurrences of 'paup>' will be removed from the
standard error contents.
cwd : string
Set the working directory of the PAUP* process to this directory.
env : dictionary
Environmental variables to set for the PAUP* process.
paup_path : string
Path to the PAUP* executable.
Returns
-------
returncode : exit value of PAUP process.
stdout : string
Contents of the PAUP process standard output.
stderr : string
Contents of the PAUP process standard error.
"""
if textprocessing.is_str_type(paup_commands):
commands = [paup_commands]
else:
commands = list(paup_commands)
if not suppress_standard_preamble:
commands.insert(0, STANDARD_PREAMBLE)
commands.append("quit")
paup_block = ";\n".join(commands) + ";\n"
invocation_command = [paup_path, "-n", "-u"]
p = subprocess.Popen(
invocation_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
env=env,
)
raw_stdout, raw_stderr = processio.communicate(p, paup_block, timeout=timeout)
# try:
# raw_stdout, raw_stderr = processio.communicate(p, paup_block, timeout=timeout)
# except TypeError as e:
# raise
# if str(e) == "communicate() got an unexpected keyword argument 'timeout'":
# raw_stdout, raw_stderr = processio.communicate(p, paup_block)
# else:
# raise
stdout = raw_stdout
stderr = raw_stderr
if strip_extraneous_prompts_from_stdout:
# weird dev/paup error ... lots or prompts spring up
stdout = stdout.replace("paup>", "")
if strip_extraneous_prompts_from_stderr:
# weird dev/paup error ... lots or prompts spring up
stderr = stderr.replace("paup>", "")
chk_stderr = stderr
else:
chk_stderr = stderr.replace("paup>", "")
if (p.returncode != 0 and not ignore_error_returncode) or (chk_stderr != "" and not ignore_nonempty_stderr):
raise error.ExternalServiceError(
service_name="PAUP*",
invocation_command=invocation_command,
service_input=paup_block,
returncode = p.returncode,
stdout=raw_stdout,
stderr=raw_stderr)
return p.returncode, stdout, stderr
@staticmethod
def bipartition_groups_to_split_bitmask(group_string, normalized=None):
"""
This converts a PAUP* group representation (i.e. a string of askterisks
and periods, where the asterisks denote the taxon index counting from
left to right) to a mask representation:
- a clade mask, where 1's represent descendents of the split/edge
(with taxon index counting from right to left, i.e., first taxon
is right-most bit)
- a split mask, an unrooted normalized version of the above, where
if the right most bit is not 1 the clade mask is complemented
(and not changed otherwise).
"""
group_string = group_string[::-1] # flip to get correct orientation
split_bitmask = int(group_string.replace("*", "1").replace(".", "0"), 2)
if normalized:
mask=((2 ** len(group_string)) -1)
return container.NormalizedBitmaskDict.normalize(split_bitmask, mask, 1)
else:
return split_bitmask
def __init__(self,
suppress_standard_preamble=False,
ignore_error_returncode=False,
strip_extraneous_prompts_from_stderr=True,
strip_extraneous_prompts_from_stdout=True,
cwd=None,
env=None,
paup_path=PAUP_PATH):
self.suppress_standard_preamble = suppress_standard_preamble
self.ignore_error_returncode = ignore_error_returncode
self.strip_extraneous_prompts_from_stderr = strip_extraneous_prompts_from_stderr
self.strip_extraneous_prompts_from_stdout = strip_extraneous_prompts_from_stdout
self.cwd = cwd
self.env = env
self.paup_path = paup_path
self._nexus_writer = nexuswriter.NexusWriter()
self.commands = []
def count_splits_from_files(self,
tree_filepaths=None,
is_rooted=None,
use_tree_weights=None,
burnin=None,
taxa_definition_filepath=None,
taxon_namespace=None):
"""
Counts splits (bipartitions) in trees from files and returns the results.
Parameters
----------
tree_filepaths : iterable of strings
A list or some other iterable of file paths containing trees in
NEXUS format.
is_rooted : bool
If |True| then trees will be treated as rooted. If |False|, then
rooting follows that specified in the tree statements, defaulting
to unrooted if not specified.
use_tree_weights : bool
If |False| then tree weighting statements are disregarded.
Otherwise, they will be regarded.
burnin : integer
Skip these many trees (from beginning of each source).
taxa_definition_filepath : str
Path of file containing TAXA block to execute. This is crucial to
getting the taxon order (and hence, indexes, and hence, split
bitmasks) correct. If not given, will use the first file
given in ``tree_filepaths``.
taxon_namespace : |TaxonNamespace|
The |TaxonNamespace| object to populate.
Returns
-------
d : dictionary
A dictionary with the following keys and values:
- "bipartition_counts" : dictionary with split bitmasks as keys
and (weighted) counts of occurrences as values
- "bipartition_frequencies" : dictionary with split bitmasks as keys
and (weighted) proportional frequencies of occurrences as values
- "num_trees" : number of trees counted
- "taxon_namespace" : |TaxonNamespace| instance
corresponding to the taxa <=> split bitmask mapping
- "is_rooted" : indicates whether the trees were rooted or not
"""
self.commands = []
if taxa_definition_filepath is not None:
self.stage_execute_file(
taxa_definition_filepath,
clear_trees=True)
self.stage_load_trees(
tree_filepaths=tree_filepaths,
is_rooted=is_rooted,
use_tree_weights=use_tree_weights,
burnin=burnin,
mode=7)
self.stage_list_taxa()
self.stage_tree_info()
self.stage_count_splits(use_tree_weights=use_tree_weights)
# print("\n".join(self.commands))
returncode, stdout, stderr = self._execute_command_sequence()
# print("\n".join(stdout))
taxon_namespace = self.parse_taxon_namespace(stdout,
taxon_namespace=taxon_namespace)
is_rooted = self.parse_is_tree_rooted(stdout)
tree_count, bipartition_counts, bipartition_freqs = self.parse_group_freqs(stdout, is_rooted=is_rooted)
d = {
"num_trees" : tree_count,
"bipartition_counts" : bipartition_counts,
"bipartition_freqs" : bipartition_freqs,
"taxon_namespace" : taxon_namespace,
"is_rooted" : is_rooted,
}
return d
def get_split_distribution_from_files(self,
tree_filepaths=None,
is_rooted=None,
use_tree_weights=None,
burnin=None,
taxa_definition_filepath=None,
taxon_namespace=None,
split_distribution=None):
"""
Returns a SplitDistribution object based on splits given in
tree files.
tree_filepaths : iterable of strings
A list or some other iterable of file paths containing trees in
NEXUS format.
is_rooted : bool
If |True| then trees will be treated as rooted. If |False|, then
rooting follows that specified in the tree statements, defaulting
to unrooted if not specified.
use_tree_weights : bool
If |False| then tree weighting statements are disregarded.
Otherwise, they will be regarded.
burnin : integer
Skip these many trees (from beginning of each source).
taxa_definition_filepath : str
Path of file containing TAXA block to execute. This is crucial to
getting the taxon order (and hence, indexes, and hence, split
bitmasks) correct. If not given, will use the first file
given in ``tree_filepaths``.
taxon_namespace : |TaxonNamespace|
|TaxonNamespace| object to use.
split_distribution : `SplitDistribution`
`SplitDistribution object to use.
"""
if split_distribution is None:
split_distribution = dendropy.SplitDistribution(taxon_namespace=taxon_namespace)
taxon_namespace = split_distribution.taxon_namespace
else:
if taxon_namespace is None:
taxon_namespace = split_distribution.taxon_namespace
else:
assert split_distribution.taxon_namespace is taxon_namespace
result = self.count_splits_from_files(
tree_filepaths=tree_filepaths,
is_rooted=is_rooted,
use_tree_weights=use_tree_weights,
burnin=burnin,
taxa_definition_filepath=taxa_definition_filepath,
taxon_namespace=taxon_namespace)
for split in result["bipartition_counts"]:
if not is_rooted:
sd_split_key = split_distribution.normalize_bitmask(split)
else:
sd_split_key = split
split_distribution.add_split_count(sd_split_key, result["bipartition_counts"][split])
split_distribution.total_trees_counted = result["num_trees"]
return split_distribution
def stage_execute_file(self,
filepath,
clear_trees=False):
"""Executes file, optionally clearing trees from file if requested"""
self.commands.append("execute {}".format(filepath))
if clear_trees:
self.commands.append("cleartrees")
return commands
def stage_load_trees(self,
tree_filepaths,
is_rooted=None,
use_tree_weights=None,
burnin=None,
mode=7): # keep trees in memory, specify 3 to clear
"""
Composes commands to load a set of trees into PAUP*, with the specified
number of burnin dropped.
"""
if textprocessing.is_str_type(tree_filepaths):
raise Exception("expecting list of filepaths, not string")
if is_rooted is None:
rooting = ""
elif is_rooted:
rooting = "rooted=yes"
else:
rooting = "unrooted=yes"
if use_tree_weights is None:
treewts = ""
elif use_tree_weights:
treewts = "storetreewts=yes"
else:
treewts = "storetreewts=no"
if burnin is None:
burnin = 0
gettree_template = "gett file= '{{tree_filepath}}' storebrlens=yes warntree=no {rooting} {treewts} from={burnin} mode={mode};".format(
rooting=rooting,
treewts=treewts,
burnin=burnin+1,
mode=mode)
for tree_filepath in tree_filepaths:
# self.commands.append(gettree_template.format(tree_filepath=tree_filepath))
# using relpath because of a bug in PAUP* 4.0b10 with long paths passed to gettrees
self.commands.append(gettree_template.format(tree_filepath=os.path.relpath(tree_filepath)))
return self.commands
def stage_list_taxa(self):
"""
Given a data file in memory, this gets PAUP* to print a list of
taxa that can be used to build a TaxaBlock later.
"""
# self.commands.append("[!TAXON LIST BEGIN]\ntstatus / full;\n[!TAXON LIST END]\n")
self.commands.append("[!TAXON LIST BEGIN]\ntstatus / full;\n[!TAXON LIST END]\n")
return self.commands
def stage_tree_info(self):
self.commands.append("[!TREE INFO BEGIN]treeinfo;\n[!TREE INFO END]\n")
return self.commands
def stage_count_splits(self,
use_tree_weights=None,
majrule_filepath=None,
majrule_freq=0.5):
"""
Given trees in memory, this composes a command to count the split
frequencies across the trees as well as a save the majority-rule
consensus tree if a path is given.
"""
percent = int(100 * majrule_freq)
if majrule_filepath is None:
treefile = ""
else:
treefile = " treefile={filepath} replace=yes "
if use_tree_weights is None:
treewts = ""
elif use_tree_weights:
treewts = "usetreewts=yes"
else:
treewts = "usetreewts=no"
commands = []
commands.append("[!SPLITS COUNT BEGIN]")
commands.append("contree / strict=no {treefile} showtree=no grpfreq=yes majrule=yes percent={percent} {treewts}".format(
treefile=treefile,
percent=percent,
treewts=treewts))
commands.append("[!SPLITS COUNT END]")
self.commands.extend(commands)
return self.commands
def stage_execute_file(self, filepath, clear_trees=False):
"""Executes file, optionally clearing trees from file if requested"""
self.commands.append("execute '{}'".format(filepath))
if clear_trees:
self.commands.append("cleartrees")
return self.commands
##############################################################################
## Processing of Output
def parse_taxon_namespace(self, paup_output, taxon_namespace=None):
"""
Given PAUP* output that includes a taxon listing as produced by
``stage_list_taxa``, this parses out and returns a taxon block.
"""
taxlabels = []
taxinfo_pattern = re.compile(r'\s*(\d+) (.*)\s+\-')
idx = 0
for line in paup_output:
idx += 1
if line == "TAXON LIST BEGIN":
break
for line in paup_output[idx:]:
if line == "TAXON LIST END":
break
ti_match = taxinfo_pattern.match(line)
if ti_match:
label = ti_match.group(2).strip()
taxlabels.append(label)
if taxon_namespace is None:
taxon_namespace = dendropy.TaxonNamespace()
for taxlabel in taxlabels:
taxon_namespace.require_taxon(label=taxlabel)
return taxon_namespace
def parse_is_tree_rooted(self, paup_output):
"""
Given PAUP* output that includes a information produced by
``stage_tree_info``, this parses out and returns the rooting
state of trees in memory
"""
pattern = re.compile(r'\d+ (\w+) trees in memory')
for line in paup_output:
if line == "TREE INFO END":
break
match = pattern.match(line)
if match:
s = match.groups(1)[0]
if s == "unrooted":
return False
elif s == "rooted":
return True
else:
return None
raise Exception("Unable to find tree information")
def parse_group_freqs(self, paup_output, is_rooted=None):
"""
Given PAUP* output that includes a split counting procedure, this
collects the splits and returns a dictionary of split bitmasks and their
frequencies.
"""
bipartitions = []
bipartition_freqs = {}
bipartition_counts = {}
tree_count = None
tree_count_pattern = re.compile(r'.*Majority-rule consensus of ([\d]*) tree.*', re.I)
bipartition_section = re.compile(r'Bipartitions found in one or more trees and frequency of occurrence:')
bp_full_row_with_perc_col = re.compile(r'([\.|\*]+)\s+([\d\.]+)\s+([\d\.]*)%')
bp_full_row_with_no_perc_col = re.compile(r'([\.|\*]+)\s+([\d\.]+)')
bp_row = re.compile(r'([\.|\*]+).*')
# find tree count
for idx, line in enumerate(paup_output):
tp_match = tree_count_pattern.match(line)
if tp_match:
break
if not tp_match:
raise Exception("Failed to find tree count in PAUP* output")
tree_count = int(tp_match.group(1))
while not bp_row.match(paup_output[idx]):
idx += 1
split_idx = 0
split_reps = {}
for line in paup_output[idx:]:
if line == "SPLITS COUNT END":
break
bp_match = bp_full_row_with_perc_col.match(line)
if not bp_match:
bp_match = bp_full_row_with_no_perc_col.match(line)
if bp_match:
# full row, or end of partial rows
if len(split_reps) == 0:
split_rep = bp_match.group(1)
else:
split_rep = split_reps[split_idx] + bp_match.group(1)
split_bitmask = PaupService.bipartition_groups_to_split_bitmask(split_rep, normalized=not is_rooted)
bipartition_counts[split_bitmask] = float(bp_match.group(2))
try:
bipartition_freqs[split_bitmask] = float(bp_match.group(3)) / 100
except IndexError:
bipartition_freqs[split_bitmask] = bipartition_counts[split_bitmask] / 100
split_idx += 1
else:
# either (1) partial row or (2) break between sections
bp_match = bp_row.match(line)
if not bp_match:
split_idx = 0
else:
if split_idx in split_reps:
split_reps[split_idx] += bp_match.group(1)
else:
split_reps[split_idx] = bp_match.group(1)
split_idx += 1
return tree_count, bipartition_counts, bipartition_freqs
##############################################################################
## Support
def _execute_command_sequence(self):
returncode, stdout, stderr = PaupService.call(self.commands)
self.commands = []
stdout = stdout.split("\n")
stderr = stderr.split("\n")
return returncode, stdout, stderr
##############################################################################
## Wrappers for PAUP* Services
def call(*args, **kwargs):
return PaupService.call(*args, **kwargs)
def symmetric_difference(tree1, tree2):
if tree1.taxon_namespace is not tree2.taxon_namespace:
trees = dendropy.TreeList([dendropy.Tree(tree1), dendropy.Tree(tree2)])
else:
trees = dendropy.TreeList([tree1, tree2], taxon_namespace=tree1.taxon_namespace)
tf = tempfile.NamedTemporaryFile("w", delete=True)
trees.write_to_stream(tf, schema='nexus')
tf.flush()
assert tree1.is_rooted == tree2.is_rooted
sd = get_split_distribution(
tree_filepaths=[tf.name],
taxa_filepath=tf.name,
is_rooted=tree1.is_rooted,
use_tree_weights=True,
burnin=0)
sf = sd.split_frequencies
conflicts = 0
for k, v in sf.items():
if v < 1.0:
conflicts += 1
return conflicts
def pscore_trees(
trees,
char_matrix,
pset_option_list=None,
pscore_option_list=None,
paup_path=PAUP_PATH):
if pset_option_list is not None:
pset = "pset " + " ".join(pset_option_list)
else:
pset = ""
scorefile = tempfile.NamedTemporaryFile("w+", delete=True)
pscore_command = "pscore / scorefile={}".format(scorefile.name)
if pscore_option_list is not None:
pscore_command = pscore_command + " ".join(pscore_option_list)
else:
pscore_command = pscore_command
post_est_commands = """\
set crit=parsimony;
{pset}
{pscore_command}
""".format(pset=pset, pscore_command=pscore_command)
paup_block = """\
set warnreset=no;
exe '{data_file}';
gettrees file= '{intree_file}' warntree=no;
{post_est_commands};
"""
cf = tempfile.NamedTemporaryFile("w", delete=True)
char_matrix.write_to_stream(cf, schema='nexus')
cf.flush()
input_tree_file_handle = tempfile.NamedTemporaryFile("w", delete=True)
input_tree_filepath = input_tree_file_handle.name
trees.write_to_stream(input_tree_file_handle, schema="nexus")
input_tree_file_handle.flush()
paup_args = {}
paup_args["data_file"] = cf.name
paup_args["intree_file"] = input_tree_filepath
paup_args["post_est_commands"] = post_est_commands
paup_block = paup_block.format(**paup_args)
paup_run = subprocess.Popen(['%s -n' % paup_path],
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = processio.communicate(paup_run, paup_block)
if stderr:
sys.stderr.write("\n*** ERROR FROM PAUP ***")
sys.stderr.write(stderr)
sys.exit(1)
scores_str = open(scorefile.name, "r").read()
score_rows = [r for r in scores_str.split("\n")[1:] if r != ""]
assert len(score_rows) == len(trees)
scores = [int(s.split()[1]) for s in score_rows]
assert len(scores) == len(trees)
cf.close()
input_tree_file_handle.close()
scorefile.close()
return scores
def estimate_ultrametric_tree(
char_matrix,
topology_tree=None,
paup_path=PAUP_PATH):
post_est_commands = """\
set crit=likelihood;
root rootmethod=midpoint;
lset userbr=no nst = 1 basefreq = eq rates = eq clock =yes;
lscore;
"""
if topology_tree is None:
ultrametric_tree = estimate_tree(char_matrix,
tree_est_criterion="nj",
num_subst=2,
unequal_base_freqs=False,
gamma_rates=False,
prop_invar=False,
extra_post_est_commands=post_est_commands)
return ultrametric_tree
else:
paup_block = """\
set warnreset=no;
exe '%(data_file)s';
gettrees file= '%(intree_file)s' warntree=no;
%(post_est_commands)s;
savetrees file=%(outtree_file)s format=nexus root=yes brlens=yes taxablk=yes maxdecimals=20;
"""
cf = tempfile.NamedTemporaryFile("w", delete=True)
char_matrix.write_to_stream(cf, schema='nexus')
cf.flush()
input_tree_file_handle = tempfile.NamedTemporaryFile("w", delete=True)
input_tree_filepath = input_tree_file_handle.name
topology_tree.write_to_stream(input_tree_file_handle, schema="nexus")
input_tree_file_handle.flush()
# output_tree_file_handle, output_tree_filepath = tempfile.mkstemp(text=True)
output_tree_file_handle = tempfile.NamedTemporaryFile("w+", delete=True)
output_tree_filepath = output_tree_file_handle.name
paup_args = {}
paup_args["data_file"] = cf.name
paup_args["intree_file"] = input_tree_filepath
paup_args["post_est_commands"] = post_est_commands
paup_args["outtree_file"] = output_tree_filepath
paup_block = paup_block % paup_args
paup_run = subprocess.Popen([paup_path, "-n"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = processio.communicate(paup_run, paup_block)
t = dendropy.Tree.get_from_path(output_tree_filepath, "nexus", taxon_namespace=char_matrix.taxon_namespace)
cf.close()
input_tree_file_handle.close()
output_tree_file_handle.close()
return t
def estimate_tree(char_matrix,
tree_est_criterion="likelihood",
num_subst=6,
unequal_base_freqs=True,
gamma_rates=True,
prop_invar=True,
extra_pre_est_commands=None,
extra_post_est_commands=None,
paup_path='paup',
char_matrix_writing_kwargs=None,
timeout=None,
):
"""
Given a dataset, ``char_matrix``, estimates a tree using the given criterion.
"""
paup_args = {
'nst': num_subst,
'basefreq' : unequal_base_freqs and 'estimate' or 'equal',
'rates' : gamma_rates and 'gamma' or 'equal',
'pinvar' : prop_invar and 'estimate' or '0',
}
cf = tempfile.NamedTemporaryFile("w", delete=True)
if not char_matrix_writing_kwargs:
char_matrix_writing_kwargs = {}
char_matrix.write_to_stream(cf, schema='nexus', **char_matrix_writing_kwargs)
cf.flush()
paup_args['datafile'] = cf.name
# output_tree_file_handle, output_tree_filepath = tempfile.mkstemp(text=True)
output_tree_file_handle = tempfile.NamedTemporaryFile("w+", delete=True)
output_tree_filepath = output_tree_file_handle.name
paup_args['est_tree_file'] = output_tree_filepath
if extra_pre_est_commands:
if textprocessing.is_str_type(extra_pre_est_commands):
extra_pre_est_commands = [extra_pre_est_commands]
paup_args["pre_est_commands"] = ";\n".join(extra_pre_est_commands)
else:
paup_args["pre_est_commands"] = ""
if extra_post_est_commands:
if textprocessing.is_str_type(extra_post_est_commands):
extra_post_est_commands = [extra_post_est_commands]
paup_args["post_est_commands"] = ";\n".join(extra_post_est_commands)
else:
paup_args["post_est_commands"] = ""
paup_template = """\
set warnreset=no;
exe %(datafile)s;
"""
if tree_est_criterion.startswith("like"):
paup_template += """\
lset tratio=estimate rmatrix=estimate nst=%(nst)s basefreq=%(basefreq)s rates=%(rates)s shape=estimate pinvar=%(pinvar)s ;
"""
if tree_est_criterion not in ["nj", "upgma"] :
paup_template += """\
set crit=%s;
""" % tree_est_criterion
paup_template += """\
%(pre_est_commands)s;
"""
if tree_est_criterion in ["nj", "upgma"] :
paup_template += tree_est_criterion + ";"
else:
paup_template += "hsearch;"
paup_template += """\
%(post_est_commands)s;
savetrees file=%(est_tree_file)s format=nexus brlens=yes taxablk=yes maxdecimals=20;
"""
# paup_run = subprocess.Popen(['%s -n' % paup_path],
# shell=True,
# stdin=subprocess.PIPE,
# stdout=subprocess.PIPE)
# stdout, stderr = processio.communicate(paup_run, paup_template % paup_args)
returncode, stdout, stderr = PaupService.call(
paup_commands=paup_template % paup_args,
paup_path=paup_path,
timeout=timeout,
)
t = dendropy.Tree.get_from_path(output_tree_filepath, "nexus", taxon_namespace=char_matrix.taxon_namespace)
cf.close()
output_tree_file_handle.close()
return t
def estimate_model(char_matrix,
tree_model=None,
num_subst=6,
unequal_base_freqs=True,
gamma_rates=True,
prop_invar=True,
tree_est_criterion="likelihood",
tree_user_brlens=True,
paup_path='paup'):
"""
Given a dataset, ``char_matrix``, uses client-supplied tree or estimates a
tree, and character substitution model for the data.
Returns a tuple, consisting of a trees block with the tree(s) used for the
estimated character model, and a dictionary with estimates of rates, kappa,
base_frequencies, alpha, prop_invar, etc. as well as likelihood.
"""
paup_args = {
'nst': num_subst,
'basefreq' : unequal_base_freqs and 'estimate' or 'equal',
'rates' : gamma_rates and 'gamma' or 'equal',
'pinvar' : prop_invar and 'estimate' or '0',
}
if tree_model is not None:
assert tree_model.taxon_namespace is char_matrix.taxon_namespace
tf = tempfile.NamedTemporaryFile("w", delete=True)
tree_model.write_to_stream(tf, 'nexus')
tf.flush()
paup_args['tree'] = "gettrees file=%s storebrlens=yes;" % tf.name
else:
if tree_est_criterion in ["nj", "upgma"] :
paup_args['tree'] = tree_est_criterion
else:
paup_args['tree'] = "set crit=%s; hsearch; set crit=like;" % tree_est_criterion
if tree_user_brlens:
paup_args['userbrlens'] = 'yes'
else:
paup_args['userbrlens'] = 'no'
cf = tempfile.NamedTemporaryFile("w", delete=True)
char_matrix.write_to_stream(cf, schema='nexus')
cf.flush()
paup_args['datafile'] = cf.name
# output_tree_file_handle, output_tree_filepath = tempfile.mkstemp(text=True)
output_tree_file_handle = tempfile.NamedTemporaryFile("w+", delete=True)
output_tree_filepath = output_tree_file_handle.name
paup_args['est_tree_file'] = output_tree_filepath
paup_template = """\
set warnreset=no;
exe %(datafile)s;
set crit=like;
lset tratio=estimate rmatrix=estimate nst=%(nst)s basefreq=%(basefreq)s rates=%(rates)s shape=estimate pinvar=%(pinvar)s userbrlens=%(userbrlens)s;
%(tree)s;
lscore 1 / userbrlens=%(userbrlens)s;
savetrees file=%(est_tree_file)s format=nexus root=yes brlens=yes taxablk=yes maxdecimals=20;
"""
paup_run = subprocess.Popen(['%s -n' % paup_path],
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = processio.communicate(paup_run, paup_template % paup_args)
results = {}
patterns = {
'likelihood' : re.compile(r'-ln L\s+([\d\.]+)'),
'rAC' : re.compile(r' AC\s+([\d\.]+)'),
'rAG' : re.compile(r' AG\s+([\d\.]+)'),
'rAT' : re.compile(r' AT\s+([\d\.]+)'),
'rCG' : re.compile(r' CG\s+([\d\.]+)'),
'rCT' : re.compile(r' CT\s+([\d\.]+)'),
'rGT' : re.compile(r' GT\s+([\d\.]+)'),
'kappa': re.compile(r' kappa\s+([\d\.]+)'),
'prop_invar' : re.compile(r'P_inv\s+([\d\.]+)'),
'alpha' : re.compile(r'Shape\s+([\S]+)'),
'pA' : re.compile(r' A\s+([\d\.]+)'),
'pC' : re.compile(r' C\s+([\d\.]+)'),
'pG' : re.compile(r' G\s+([\d\.]+)'),
'pT' : re.compile(r' T\s+([\d\.]+)'),
}
for value_name in patterns:
results[value_name] = None
for line in stdout.split('\n'):
for value_name in patterns:
m = patterns[value_name].match(line)
if m:
results[value_name] = m.group(1)
for value_name in results.keys():
if value_name == 'likelihood':
results[value_name] = -1 * float(results[value_name])
results["log_likelihood"] = results[value_name]
elif results[value_name] is not None:
try:
results[value_name] = float(results[value_name])
except:
pass
t = dendropy.Tree.get_from_path(output_tree_filepath, "nexus", taxon_namespace=char_matrix.taxon_namespace)
cf.close()
output_tree_file_handle.close()
return t, results
def prune_taxa_from_trees(trees, taxa, paup_path='paup'):
"""
Drops Taxon objects given in container ``taxa`` from TreeList ``trees``
"""
tf = tempfile.NamedTemporaryFile("w", delete=True)
trees.write_to_stream(tf, schema='nexus')
tf.flush()
output_tree_file_handle = tempfile.NamedTemporaryFile("w+", delete=True)
output_tree_filepath = output_tree_file_handle.name
tax_idxs = [ str(trees.taxon_namespace.index(t)+1) for t in taxa ]
tax_idxs = " ".join(tax_idxs)
paup_template = """\
set warnreset=no;
exe %s;
gett file=%s storebrlens=yes;
delete %s / prune;
savetrees file=%s format=nexus brlens=user taxablk=yes maxdecimals=20;
""" % (tf.name,
tf.name,
tax_idxs,
output_tree_filepath)
paup_run = subprocess.Popen(['%s -n' % paup_path],
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = processio.communicate(paup_run, paup_template)
t = dendropy.TreeList.get_from_path(output_tree_filepath,
"nexus",
taxon_namespace=trees.taxon_namespace)
output_tree_file_handle.close()
return t
###############################################################################
## PAUP* WRAPPERS
class PaupSession(processio.Session):
"""
Starts a PAUP* session, which remains active until explicitly closed.
Various commands can get executed and results returned.
"""
EOC_FLAG = "@@@END-OF-COMMAND@@@"
FLAG_DETECT = re.compile(r'^\s*%s\s*$' % EOC_FLAG, re.MULTILINE)
EOC_FLAG_STRIP = re.compile(r"^(paup>)*\s*(\[!)*" + EOC_FLAG + r"(\])*\s*$", re.MULTILINE)
# FLAG_DETECT = re.compile(r'[^\[]\s*%s\s*[^\]]' % EOC_FLAG, re.MULTILINE)
def __init__(self, paup_path=None):
processio.Session.__init__(self, join_err_to_out=False)
if paup_path is None:
self.paup_path = PAUP_PATH
else:
self.paup_path = paup_path
self.start([self.paup_path])
def __del__(self):
self.stop()
def stop(self):
if self.process:
try:
self.process.terminate()
except:
pass
self.process = None
def send_command(self, command):
command = command + ";\n"
command = command + "[!" + self.EOC_FLAG + "]\n"
self.process.stdin.write(command)
self.process.stdin.flush()
stdout_block = ""
while True:
stdout = self._stdout_reader.read()
if stdout is not None:
stdout_block = stdout_block + stdout
if self.FLAG_DETECT.search(stdout_block):
stdout_block = self.EOC_FLAG_STRIP.sub("", stdout_block)
break
# else:
# print stdout_block
stderr_block = ""
while True:
stderr = self._stderr_reader.read()
if stderr is not None:
stderr_block += stderr
else:
break
return stdout_block, stderr_block
def execute_file(self, filepath):
return self.send_command("set warnreset=no; execute %s;\n" % filepath)
def read_data(self, data):
"""
Writes ``data`` as NEXUS-formatted file and
executes file within processio.
"""
cf = tempfile.NamedTemporaryFile("w", delete=True)
data.write_to_stream(cf, "nexus")
cf.flush()
stdout, stderr = self.execute_file(cf.name)
return stdout, stderr | PypiClean |
/CRLFsuite-2.5.2.tar.gz/CRLFsuite-2.5.2/crlfsuite/plugins/wafDetector.py |
import re
import json
import requests
import warnings
from urllib.parse import urlparse
from crlfsuite.utils.compatible import compatible_path
warnings.filterwarnings('ignore')
crlfsuite_dir = compatible_path(__file__.replace(compatible_path('/crlfsuite/plugins/wafDetector.py'), ''))
def WafDetector(url):
"""
Checks if the WAF is online or offline using wafsignatures.json
"""
try:
domain, scheme, path = urlparse(url).netloc, urlparse(url).scheme, urlparse(url).path
url = (scheme+'://'+domain+path)
if not url.endswith('/'):
url = url+'/'
signature_file = crlfsuite_dir+'/crlfsuite/db/wafsignatures.json'
signature_file = compatible_path(signature_file)
with open(signature_file, 'r', encoding='utf-8') as file:
wafsigns = json.load(file)
payload = "../../../etc/passwd"
response = requests.get(url+payload)
code = str(response.status_code)
page = response.text
headers = str(response.headers)
cookie = str(response.cookies.get_dict())
if int(code) >= 400:
bmatch = [0, None]
for wafname, wafsign in wafsigns.items():
total_score = 0
pSign = wafsign["page"]
cSign = wafsign["code"]
hSign = wafsign["headers"]
ckSign = wafsign["cookie"]
if pSign:
if re.search(pSign, page, re.I):
total_score += 1
if cSign:
if re.search(cSign, code, re.I):
total_score += 0.5
if hSign:
if re.search(hSign, headers, re.I):
total_score += 1
if ckSign:
if re.search(ckSign, cookie, re.I):
total_score += 1
if total_score > bmatch[0]:
del bmatch[:]
bmatch.extend([total_score, wafname])
if bmatch[0] != 0:
return bmatch[1]
else:
None
else:
None
except Exception as e:
pass | PypiClean |
/BlueWhale3-BlueWhale-0.0.54.tar.gz/BlueWhale3-BlueWhale-0.0.54/orangecontrib/blue_whale/widgets/utils/service_window.py | from AnyQt.QtWidgets import QLineEdit, QGridLayout, QPushButton, QMessageBox, QSizePolicy as Policy, QLabel, QCheckBox
from AnyQt.QtCore import Qt, QSettings
from Orange.widgets.widget import OWWidget
from Orange.widgets.utils.widgetpreview import WidgetPreview
from Orange.widgets import gui
from Orange.widgets.settings import Setting
from orangecontrib.blue_whale.widgets.utils.config import get_url, DEFAULT_URL
from orangecontrib.blue_whale.canvasmain import login
from orangecontrib.blue_whale.i18n_config import *
def __(key):
return i18n.t("bluewhale.service_window." + key)
def set_style(button, param=None):
if param == 'ok':
button.setStyleSheet(
"QPushButton{color:black}"
"QPushButton{height:30px}"
"QPushButton{line-height:30px}"
"QPushButton{border-radius:4px}"
"QPushButton{font-size:14px}"
"QPushButton{margin-top:0px}"
"QPushButton{background:#1890ff}"
"QPushButton:hover{background:#00a9fd}"
"QPushButton{color:#fff}"
)
else:
button.setStyleSheet(
"QPushButton{color:black}"
"QPushButton{height:30px}"
"QPushButton{line-height:30px}"
"QPushButton{border-radius:4px}"
"QPushButton{font-size:14px}"
"QPushButton{margin-top:0px}"
"QPushButton{background:#ccc}"
"QPushButton:hover{background:#e9e9e9}"
)
class ServiceWindow(OWWidget):
name = __("name")
want_basic_layout = False
want_main_area = True
want_control_area = False
auto_commit = Setting(True)
def __init__(self, *args, **kwargs):
super().__init__(self, *args, **kwargs)
self.__mainLayout = True
self.__feedbackUrl = None
self.__feedbackLabel = None
self.setStyleSheet(
"QLineEdit{background-color:rgb(255,255,255) !important}"
'QLineEdit{padding:3px 2px}'
'QLineEdit{border-radius: 4px}'
'QLineEdit{border: 1px solid #e9e9e9}'
'QLineEdit{font-size: 14px}'
'QLabel{font-size:14px}'
'QLabel{color: #000 !important}'
'QCheckBox{font-size:14px}'
)
layout = QGridLayout()
self.serverCheck = QCheckBox(
__("check_btn"), self,
toolTip=__("check_btn_tip")
)
self.serverCheck.stateChanged.connect(self.clickBox)
layout.layout().addWidget(self.serverCheck, 0, 0, 1, 2)
self.serverCheck.setSizePolicy(Policy.MinimumExpanding, Policy.MinimumExpanding)
hbox = gui.hBox(self)
self.tip_text = QLabel(__("input_address"))
hbox.layout().addWidget(self.tip_text)
self.serverLineEdit = QLineEdit()
# self.serverLineEdit.setAlignment(Qt.AlignLeft)
self.serverLineEdit.setPlaceholderText('http or https://')
hbox.layout().addWidget(self.serverLineEdit)
layout.addWidget(hbox, 1, 0, 1, 2)
self.okBtn = QPushButton(__("save"))
set_style(self.okBtn, 'ok')
# okBtn.setFixedSize(100, 30)
self.cancelBtn = QPushButton(__("cancel"))
set_style(self.cancelBtn)
# cancelBtn.setFixedSize(100,30)
layout.layout().addWidget(self.okBtn, 2, 0, 1, 1)
layout.layout().addWidget(self.cancelBtn, 2, 1, 1, 1)
if self.status():
self.serverCheck.setCheckState(False)
self.serverLineEdit.setEnabled(False)
set_style(self.okBtn)
else:
self.serverCheck.setCheckState(2)
self.serverLineEdit.setText(get_url())
set_style(self.okBtn, 'ok')
self.okBtn.clicked.connect(self.accept)
self.cancelBtn.clicked.connect(self.reject)
self.setLayout(layout)
self.setWindowTitle(__("login_set"))
self.setFixedSize(465, 140)
def status(self):
if get_url() == DEFAULT_URL:
return True
else:
return False
def save_url(self, url):
settings = QSettings()
settings.setValue('account/service', url)
login(way=1)
def clickBox(self, state):
if state == Qt.Checked:
self.serverLineEdit.setEnabled(True)
self.serverLineEdit.setFocus()
set_style(self.okBtn, 'ok')
if not self.status():
self.serverLineEdit.setText(get_url())
else:
if self.status():
self.serverLineEdit.setText('')
self.serverLineEdit.setEnabled(False)
set_style(self.okBtn)
else:
net = QMessageBox(
parent=self, windowTitle=self.tr(__("tip")),
icon=QMessageBox.Question,
standardButtons=QMessageBox.Yes | QMessageBox.Cancel,
text=self.tr(__("default_address"))
)
net.button(QMessageBox.Yes).setText(__("ok"))
net.button(QMessageBox.Cancel).setText(__("cancel"))
status = net.exec()
if status == QMessageBox.Yes:
self.save_url(DEFAULT_URL)
net = QMessageBox(
self, windowTitle=self.tr(__("tip")),
icon=QMessageBox.Information,
standardButtons=QMessageBox.Yes,
text=self.tr(__("change_address")),
)
net.button(QMessageBox.Yes).setText(__("ok"))
net.exec_()
self.close()
elif status == QMessageBox.Cancel:
self.serverCheck.setCheckState(2)
def show_error(self):
net = QMessageBox(
self, windowTitle=self.tr(__('ok')),
icon=QMessageBox.Question,
standardButtons=QMessageBox.Yes,
text=self.tr(__("error_address")),
)
net.button(QMessageBox.Yes).setText(__("reenter"))
self.serverLineEdit.setFocus()
net.show()
def accept(self):
if self.serverCheck.isChecked():
url = self.serverLineEdit.text().strip()
try:
import requests
response = requests.get(url + '/api/open/client_id', timeout=7)
if response.status_code == 200 and response.text == 'onion-ring-service':
self.save_url(url)
net = QMessageBox(
self, windowTitle=self.tr(__("tip")),
icon=QMessageBox.Information,
standardButtons=QMessageBox.Yes,
text=self.tr(__("change_success")),
)
net.button(QMessageBox.Yes).setText(__("ok"))
self.serverLineEdit.setFocus()
net.exec_()
self.close()
else:
self.show_error()
except Exception as e:
self.show_error()
if __name__ == "__main__":
WidgetPreview(ServiceWindow).run()
# app = QApplication(sys.argv)
# mainWin = ExampleWindow()
# mainWin.show()
# sys.exit(app.exec_()) | PypiClean |
/Fyreside-0.0.4-py3-none-any.whl/fyreside/__init__.py | from inspect import getmembers, isfunction, isclass
import qtmud
import qtmud.subscriptions
from fyreside import cards, cmds, services, subscriptions, txt
__version__ = '0.0.4'
connected_players = list()
""" The currently connected players. """
player_hands = dict()
""" All the hands currently held by different players, in the format of
``{ player : [ list, of, cards ] }``"""
DECK = list()
""" built from the classes in :mod:`fyreside.cards` when :func:`load` is
called. """
class Player(qtmud.Client):
def __init__(self, **kwargs):
super(Player, self).__init__(**kwargs)
self.max_hand = 7
self.max_health = 20
self.history = list()
self.hand = list()
self.health = 20
self.mana = 10
self.armor = 0
self.word_count = 0
qtmud.active_services['talker'].tune_channel(client=self,
channel='fyreside')
def search_connected_players_by_name(name, singular=False):
matches = [p for p in connected_players if p.name.lower() == name.lower()]
if singular:
if len(matches) == 1:
return matches[0]
else:
return None
return matches
def search_hand(player, text):
""" Searches player's hands for any cards whose name matches text,
or whose name has one word matching with text if text is one word.
"""
matches = list()
digit = None
if text[-1].isdigit():
digit = text.split(' ')[-1]
text = ' '.join(text.split(' ')[0:-1])
if text == 'card':
matches += player.hand
else:
for card in player.hand:
if text == card.name.lower() or \
(len(text.split(' ')) == 1 and
text == card.name.split(' ')[-1].lower()):
matches.append(card)
if matches and digit:
try:
# TODO better translation from user reference to list position
matches = [matches[int(digit) - 1]]
except IndexError:
raise SyntaxWarning('You have that card, but not that many.')
return matches
def load():
""" Adds Fyreside :mod:`subscriptions <fyreside.subscriptions>` to
:attr:`qtmud.active_subscribers` and builds :attr:`DECK` from the classes
in :mod:`fyreside.cards`.
"""
global DECK
qtmud.log.info('load()ing Fyreside')
qtmud.log.info('adding fyreside.subscriptions to qtmud.subscribers')
for s in getmembers(subscriptions):
if isfunction(s[1]):
if not s[1].__name__ in qtmud.subscribers:
qtmud.subscribers[s[1].__name__] = list()
qtmud.subscribers[s[1].__name__].append(s[1])
qtmud.active_services['talker'].new_channel('fyreside')
for card in [c[1]() for c in getmembers(cards) if isclass(c[1])]:
for _ in range(card.rarity):
DECK.append(card.__class__())
qtmud.log.info('Built the Fireside deck - {} cards total.'
''.format(len(DECK)))
return True
def start():
return True | PypiClean |
/HavNegpy-1.2.tar.gz/HavNegpy-1.2/docs/_build/doctrees/nbsphinx/_build/doctrees/nbsphinx/_build/doctrees/nbsphinx/_build/doctrees/nbsphinx/_build/doctrees/nbsphinx/_build/html/hn_module_tutorial.ipynb | # Tutorial for the HN module of HavNegpy package
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import HavNegpy as dd
%matplotlib qt
os.chdir(r'M:\Marshall_Data\mohamed_data\mohamed_data\n44')
def create_dataframe(f):
col_names = ['Freq', 'T', 'Eps1', 'Eps2']
#f = input(str("Enter the filename:"))
df = pd.read_csv(f, sep=r"\s+",index_col=False,usecols = [0,1,2,3],names=col_names,header=None,skiprows=4,encoding='unicode_escape',engine='python')
col1 = ['log f']
for start in range(0, len(df), 63):
name = df['T'][start]
#print(name)
col1.append(name)
df2 = pd.DataFrame()
f1 = df['Freq'][0:63].values
x1 = np.log10((f1))
e = pd.DataFrame(x1)
df2['log f'] = pd.concat([e],axis=1,ignore_index=True)
global Cooling,Heating
for start in range(0, len(df), 63):
f = df['Eps2'][start:start+63].values
ep = np.log10(f)
d = pd.DataFrame(ep)
df2[start] = pd.concat([d],axis=1,ignore_index=True)
df2.columns = col1
'''
a = int(len(col1)/3)
b = 2*a
c = int(len(col1)) - b
Heating1 = df2.iloc[8:,0:a+1]
Cooling = df2.iloc[8:,a+1:b+1]
Heating2 = df2.iloc[8:,b+1:]
heat1_col = col1[0:a+1]
cool_col = col1[a+1:b+1]
heat2_col = col1[b+1:]
Cooling.columns = cool_col
Heating1.columns = heat1_col
Heating2.columns = heat2_col
f2 = df['Freq'][8:59].values
x2 = np.log10((f2))
Cooling['Freq'] = x2
Heating1['Freq'] = x2
Heating2['Freq'] = x2
'''
Cooling = df2.iloc[:,0:25]
Heating = df2.iloc[:,25:]
return df,df2,Cooling,Heating #Heating2
df,df2,cool,heat = create_dataframe('EPS.TXT')
x,y = df2['log f'][9:], heat[40][9:]
plt.figure()
plt.scatter(x,y,label='data for fitting')
plt.xlabel('log f [Hz]')
plt.ylabel('log $\epsilon$"')
plt.legend()
plt.title('Example for HN fitting')
```
image of the plot we are using in this tutorial

```
''' instantiate the HN module from HavgNegpy'''
hn = dd.HN()
''' select range to perform hn fitting'''
''' the select range functions pops in a separate window and allows you two clicks to select the region of interest (ROI)'''
''' In this tutorial, I'll plot the ROI and append as an image in the next cell'''
x1,y1 = hn.select_range(x,y)
''' view the data from select range'''
plt.scatter(x1,y1,label = 'Data for fitting')
plt.xlabel('log f [Hz]')
plt.ylabel('log $\epsilon$"')
plt.legend()
plt.title('ROI selected from HN module')
```
image of the ROI from HN module
```
''' dump the initial guess parameters using dump parameters method (varies for each fn), which dumps the parameters in a json file'''
''' this is required before performing the first fitting as it takes the initial guess from the json file created'''
hn.dump_parameters_hn()
''' view the initial guess for the ROI using initial_view method'''
''' I'll append the image in the next cell'''
hn.initial_view_hn(x1,y1)
```
image of the initial guess
```
''' pefrorm least squares fitting'''
''' The image of the curve fit is added in the next cell '''
hn.fit(x1,y1)
```
Example of the fit performed using single HN function
the procedure is similar for double HN and HN with conductivity

```
'''create a file to save fit results using create_analysis file method'''
''' before saving fit results an analysis file has to be created '''
hn.create_analysis_file()
''' save the fit results using save_fit method of the corresponding fit function'''
''' takes one argument, read more on the documentation'''
hn.save_fit_hn(1)
```
| PypiClean |
/ESMValCore-2.9.0rc1.tar.gz/ESMValCore-2.9.0rc1/esmvalcore/preprocessor/_volume.py | import logging
import dask.array as da
import iris
import numpy as np
from ._shared import get_iris_analysis_operation, operator_accept_weights
from ._supplementary_vars import register_supplementaries
logger = logging.getLogger(__name__)
def extract_volume(
cube,
z_min,
z_max,
interval_bounds='open',
nearest_value=False
):
"""Subset a cube based on a range of values in the z-coordinate.
Function that subsets a cube on a box of (z_min, z_max),
(z_min, z_max], [z_min, z_max) or [z_min, z_max]
Note that this requires the requested z-coordinate range to be the
same sign as the iris cube. ie, if the cube has z-coordinate as
negative, then z_min and z_max need to be negative numbers.
If nearest_value is set to `False`, the extraction will be
performed with the given z_min and z_max values.
If nearest_value is set to `True`, the cube extraction will be
performed taking into account the z_coord values that are closest
to the z_min and z_max values.
Parameters
----------
cube: iris.cube.Cube
input cube.
z_min: float
minimum depth to extract.
z_max: float
maximum depth to extract.
interval_bounds: str
sets left bound of the interval to either 'open', 'closed',
'left_closed' or 'right_closed'.
nearest_value: bool
extracts considering the nearest value of z-coord to z_min and z_max.
Returns
-------
iris.cube.Cube
z-coord extracted cube.
"""
if z_min > z_max:
# minimum is below maximum, so switch them around
zmax = float(z_min)
zmin = float(z_max)
else:
zmax = float(z_max)
zmin = float(z_min)
z_coord = cube.coord(axis='Z')
if nearest_value:
min_index = np.argmin(np.abs(z_coord.core_points() - zmin))
max_index = np.argmin(np.abs(z_coord.core_points() - zmax))
zmin = z_coord.core_points()[min_index]
zmax = z_coord.core_points()[max_index]
if interval_bounds == 'open':
coord_values = {z_coord: lambda cell: zmin < cell.point < zmax}
elif interval_bounds == 'closed':
coord_values = {z_coord: lambda cell: zmin <= cell.point <= zmax}
elif interval_bounds == 'left_closed':
coord_values = {z_coord: lambda cell: zmin <= cell.point < zmax}
elif interval_bounds == 'right_closed':
coord_values = {z_coord: lambda cell: zmin < cell.point <= zmax}
else:
raise ValueError(
'Depth extraction bounds can be set to "open", "closed", '
f'"left_closed", or "right_closed". Got "{interval_bounds}".')
z_constraint = iris.Constraint(coord_values=coord_values)
return cube.extract(z_constraint)
def calculate_volume(cube):
"""Calculate volume from a cube.
This function is used when the volume ancillary variables can't be found.
Parameters
----------
cube: iris.cube.Cube
input cube.
Returns
-------
float
grid volume.
"""
# ####
# Load depth field and figure out which dim is which.
depth = cube.coord(axis='z')
z_dim = cube.coord_dims(cube.coord(axis='z'))[0]
# ####
# Load z direction thickness
thickness = depth.bounds[..., 1] - depth.bounds[..., 0]
# ####
# Calculate grid volume:
area = da.array(iris.analysis.cartography.area_weights(cube))
if thickness.ndim == 1 and z_dim == 1:
grid_volume = area * thickness[None, :, None, None]
if thickness.ndim == 4 and z_dim == 1:
grid_volume = area * thickness[:, :]
return grid_volume
@register_supplementaries(
variables=['volcello'],
required='prefer_at_least_one',
)
def volume_statistics(cube, operator):
"""Apply a statistical operation over a volume.
The volume average is weighted according to the cell volume.
Parameters
----------
cube: iris.cube.Cube
Input cube. The input cube should have a
:class:`iris.coords.CellMeasure` with standard name ``'ocean_volume'``,
unless it has regular 1D latitude and longitude coordinates so the cell
volumes can be computed by using
:func:`iris.analysis.cartography.area_weights` to compute the cell
areas and multiplying those by the cell thickness, computed from the
bounds of the vertical coordinate.
operator: str
The operation to apply to the cube, options are: 'mean'.
Returns
-------
iris.cube.Cube
collapsed cube.
Raises
------
ValueError
if input cube shape differs from grid volume cube shape.
"""
# TODO: Test sigma coordinates.
# TODO: Add other operations.
if operator != 'mean':
raise ValueError(f'Volume operator {operator} not recognised.')
try:
grid_volume = cube.cell_measure('ocean_volume').core_data()
except iris.exceptions.CellMeasureNotFoundError:
logger.debug('Cell measure "ocean_volume" not found in cube. '
'Check fx_file availability.')
logger.debug('Attempting to calculate grid cell volume...')
grid_volume = calculate_volume(cube)
else:
grid_volume = da.broadcast_to(grid_volume, cube.shape)
if cube.data.shape != grid_volume.shape:
raise ValueError('Cube shape ({}) doesn`t match grid volume shape '
f'({cube.shape, grid_volume.shape})')
masked_volume = da.ma.masked_where(da.ma.getmaskarray(cube.lazy_data()),
grid_volume)
result = cube.collapsed(
[cube.coord(axis='Z'),
cube.coord(axis='Y'),
cube.coord(axis='X')],
iris.analysis.MEAN,
weights=masked_volume)
return result
def axis_statistics(cube, axis, operator):
"""Perform statistics along a given axis.
Operates over an axis direction. If weights are required,
they are computed using the coordinate bounds.
Arguments
---------
cube: iris.cube.Cube
Input cube.
axis: str
Direction over where to apply the operator. Possible values
are 'x', 'y', 'z', 't'.
operator: str
Statistics to perform. Available operators are:
'mean', 'median', 'std_dev', 'sum', 'variance',
'min', 'max', 'rms'.
Returns
-------
iris.cube.Cube
collapsed cube.
"""
try:
coord = cube.coord(axis=axis)
except iris.exceptions.CoordinateNotFoundError as err:
raise ValueError(f'Axis {axis} not found in cube '
f'{cube.summary(shorten=True)}') from err
coord_dims = cube.coord_dims(coord)
if len(coord_dims) > 1:
raise NotImplementedError('axis_statistics not implemented for '
'multidimensional coordinates.')
operation = get_iris_analysis_operation(operator)
if operator_accept_weights(operator):
coord_dim = coord_dims[0]
expand = list(range(cube.ndim))
expand.remove(coord_dim)
bounds = coord.core_bounds()
weights = np.abs(bounds[..., 1] - bounds[..., 0])
weights = np.expand_dims(weights, expand)
weights = da.broadcast_to(weights, cube.shape)
result = cube.collapsed(coord, operation, weights=weights)
else:
result = cube.collapsed(coord, operation)
return result
def depth_integration(cube):
"""Determine the total sum over the vertical component.
Requires a 3D cube. The z-coordinate
integration is calculated by taking the sum in the z direction of the
cell contents multiplied by the cell thickness.
Arguments
---------
cube: iris.cube.Cube
input cube.
Returns
-------
iris.cube.Cube
collapsed cube.
"""
result = axis_statistics(cube, axis='z', operator='sum')
result.rename('Depth_integrated_' + str(cube.name()))
# result.units = Unit('m') * result.units # This doesn't work:
# TODO: Change units on cube to reflect 2D concentration (not 3D)
# Waiting for news from iris community.
return result
def extract_transect(cube, latitude=None, longitude=None):
"""Extract data along a line of constant latitude or longitude.
Both arguments, latitude and longitude, are treated identically.
Either argument can be a single float, or a pair of floats, or can be
left empty.
The single float indicates the latitude or longitude along which the
transect should be extracted.
A pair of floats indicate the range that the transect should be
extracted along the secondairy axis.
For instance `'extract_transect(cube, longitude=-28)'` will produce a
transect along 28 West.
Also, `'extract_transect(cube, longitude=-28, latitude=[-50, 50])'` will
produce a transect along 28 West between 50 south and 50 North.
This function is not yet implemented for irregular arrays - instead
try the extract_trajectory function, but note that it is currently
very slow. Alternatively, use the regrid preprocessor to regrid along
a regular grid and then extract the transect.
Parameters
----------
cube: iris.cube.Cube
input cube.
latitude: None, float or [float, float], optional
transect latiude or range.
longitude: None, float or [float, float], optional
transect longitude or range.
Returns
-------
iris.cube.Cube
collapsed cube.
Raises
------
ValueError
slice extraction not implemented for irregular grids.
ValueError
latitude and longitude are both floats or lists; not allowed
to slice on both axes at the same time.
"""
# ###
coord_dim2 = False
second_coord_range = False
lats = cube.coord('latitude')
lons = cube.coord('longitude')
if lats.ndim == 2:
raise ValueError(
'extract_transect: Not implemented for irregular arrays!' +
'\nTry regridding the data first.')
if isinstance(latitude, float) and isinstance(longitude, float):
raise ValueError(
"extract_transect: Can't slice along lat and lon at the same time")
if isinstance(latitude, list) and isinstance(longitude, list):
raise ValueError(
"extract_transect: Can't reduce lat and lon at the same time")
for dim_name, dim_cut, coord in zip(['latitude', 'longitude'],
[latitude, longitude], [lats, lons]):
# ####
# Look for the first coordinate.
if isinstance(dim_cut, float):
coord_index = coord.nearest_neighbour_index(dim_cut)
coord_dim = cube.coord_dims(dim_name)[0]
# ####
# Look for the second coordinate.
if isinstance(dim_cut, list):
coord_dim2 = cube.coord_dims(dim_name)[0]
second_coord_range = [
coord.nearest_neighbour_index(dim_cut[0]),
coord.nearest_neighbour_index(dim_cut[1])
]
# ####
# Extracting the line of constant longitude/latitude
slices = [slice(None) for i in cube.shape]
slices[coord_dim] = coord_index
if second_coord_range:
slices[coord_dim2] = slice(second_coord_range[0],
second_coord_range[1])
return cube[tuple(slices)]
def extract_trajectory(cube, latitudes, longitudes, number_points=2):
"""Extract data along a trajectory.
latitudes and longitudes are the pairs of coordinates for two points.
number_points is the number of points between the two points.
This version uses the expensive interpolate method, but it may be
necceasiry for irregular grids.
If only two latitude and longitude coordinates are given,
extract_trajectory will produce a cube will extrapolate along a line
between those two points, and will add `number_points` points between
the two corners.
If more than two points are provided, then
extract_trajectory will produce a cube which has extrapolated the data
of the cube to those points, and `number_points` is not needed.
Parameters
----------
cube: iris.cube.Cube
input cube.
latitudes: list
list of latitude coordinates (floats).
longitudes: list
list of longitude coordinates (floats).
number_points: int
number of points to extrapolate (optional).
Returns
-------
iris.cube.Cube
collapsed cube.
Raises
------
ValueError
if latitude and longitude have different dimensions.
"""
from iris.analysis.trajectory import interpolate
if len(latitudes) != len(longitudes):
raise ValueError(
'Longitude & Latitude coordinates have different lengths')
if len(latitudes) == len(longitudes) == 2:
minlat, maxlat = np.min(latitudes), np.max(latitudes)
minlon, maxlon = np.min(longitudes), np.max(longitudes)
longitudes = np.linspace(minlon, maxlon, num=number_points)
latitudes = np.linspace(minlat, maxlat, num=number_points)
points = [('latitude', latitudes), ('longitude', longitudes)]
interpolated_cube = interpolate(cube, points) # Very slow!
return interpolated_cube | PypiClean |
/Flask-Leancloud-Sms-0.1.tar.gz/Flask-Leancloud-Sms-0.1/flask_leancloud_sms.py | import json
import requests
class Leancloud_Sms(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self,app):
self._app_id = app.config.get('LEANCLOUD_APP_ID', '')
self._app_key = app.config.get('LEANCLOUD_APP_KEY', '')
self._request_sms_code_url = app.config.get('REQUEST_SMS_CODE_URL', 'https://api.leancloud.cn/1.1/requestSmsCode')
self._verify_sms_code_url = app.config.get('VERIFY_SMS_CODE_URL', 'https://api.leancloud.cn/1.1/verifySmsCode/')
self._headers = {
"X-LC-Id": self._app_id,
"X-LC-Key": self._app_key,
"Content-Type": "application/json",
}
def send_message(self,phone,smsType='sms',countryCode='CN',template=None,**argv):
"""
通过 POST 请求 requestSmsCode API 发送验证码到指定手机
:param phone: 电话号
:param smsType: 验证类型
:param countryCode: 国家类型
:param template: 短信模板
:param argv: 模板参数
:return: bool值
"""
data = {
"mobilePhoneNumber": phone,
"smsType":smsType,
"countryCode":countryCode,
}
if template is not None:
data['template']=template,
data=dict(data,argv)
# post 方法参数包含三部分,如我们之前分析 API 所述
# REQUEST_SMS_CODE_URL: 请求的 URL
# data: 请求的内容,另外要将内容编码成 JSON 格式
# headers: 请求的头部,包含 Id 与 Key 等信息
r = requests.post(self._request_sms_code_url, data=json.dumps(data), headers=self._headers)
# 响应 r 的 status_code 值为 200 说明响应成功
# 否则失败
if r.status_code == 200:
return True
else:
return False
def verify_sms(self,phone, code):
"""
发送 POST 请求到 verifySmsCode API 获取校验结果
:param phone: 电话号
:param code: 验证码
:return:
"""
# 使用传进的参数 code 与 phone 拼接出完整的 URL
target_url = self._verify_sms_code_url + "%s?mobilePhoneNumber=%s" % (code, phone)
r = requests.post(target_url, headers=self._headers)
# 响应 r 的 status_code 值为 200 说明验证成功
# 否则失败
if r.status_code == 200:
return True
else:
return False | PypiClean |
/MakkaPakka-1.0.4.tar.gz/MakkaPakka-1.0.4/src/makka_pakka/parsing/detect_headings.py | from typing import Tuple
from makka_pakka.exceptions.exceptions import ErrorType
from makka_pakka.exceptions.exceptions import MKPKInvalidParameter
from makka_pakka.exceptions.exceptions import MKPKNameError
from makka_pakka.exceptions.exceptions import MKPKParsingError
class HeadingStyle:
"""Enum of heading style e.g [heading_name] is SINGLE_HEADING, [[name]] is
a DOUBLE_HEADING."""
NO_HEADING = 0
SINGLE_HEADING = 1
DOUBLE_HEADING = 2
class HeadingType:
"""Enum of heading type, which defines the type of code expected below,
and therefore how to parse this data, e.g [[data]] is DATA, [[code]] is
CODE, [[gadget]] is gadget."""
NONE = 0
DATA = 1
CODE = 2
GADGETS = 3
def detect_heading_in_line(line: str) -> Tuple[HeadingStyle, str]:
"""
Determines if a line contains a heading, i.e [{name}] or [[{name}]]
:param line: The line of makka pakka code to detect a heading in.
:return: A tuple of (HeadingStyle, {heading_name}), where heading_name is
"" if the type is HeadingStyle.NO_HEADING.
"""
if not isinstance(line, str):
raise MKPKInvalidParameter("line", "_detect_heading_in_line", line)
NO_HEADING_RETURN = (HeadingStyle.NO_HEADING, "")
# Early breakout if the line can't be a heading.
if "[" not in line or "]" not in line:
return NO_HEADING_RETURN
"""
A double pass algorithm is used to determine the length and location of the
heading. Starting with a forward pass, the index of the first ']' character
is located. Then, in the backwards pass the index of the first '[' is
found. Using the length of the string its possible to get the index of the
start of the heading.
len. of heading = forward pass index - backward pass index
start index = backward pass index
To determine if it is a double heading, check the index before the backward
pass index for a '[' character, and the index after the forward pass index
for a ']' character.
"""
# Forward pass
NO_CHARACTER_FOUND = -1
forward_pass_index: int = NO_CHARACTER_FOUND
for i, char in enumerate(line):
if char == "]":
forward_pass_index = i
break
# Early return if no ']' character found.
if forward_pass_index == NO_CHARACTER_FOUND:
return NO_HEADING_RETURN
# Backward pass
backward_pass_index: int = NO_CHARACTER_FOUND
for i, char in enumerate(line[::-1]):
if char == "[":
# The list has been reversed, so un-reverse the index.
backward_pass_index = (len(line) - i) - 1
break
# Early return if no '[' character found.
if backward_pass_index == NO_CHARACTER_FOUND:
return NO_HEADING_RETURN
# '[', ']' are valid characters in .asm, but only when inlined. Therefore
# we can differenciate by enforcing that headings start at index 0|1.
if backward_pass_index > 1:
return NO_HEADING_RETURN
# Check that the backward pass index is before the forward pass index.
if backward_pass_index >= forward_pass_index:
return NO_HEADING_RETURN
heading_name = line[backward_pass_index + 1 : forward_pass_index]
# Assert that the heading name is valid
try:
_assert_valid_mkpk_name(heading_name, line)
except MKPKNameError:
raise MKPKParsingError(
"Invalid heading name",
f"An invalid heading name '${heading_name}' was encountered in the\
following line:\n >${line}",
ErrorType.FATAL,
)
# Check if it is a double heading.
if 0 <= backward_pass_index - 1 and forward_pass_index + 1 < len(line):
if line[backward_pass_index - 1] == "[" and line[forward_pass_index + 1] == "]":
# TODO: Check that the [[]] heading is in the valid set of heading
# names.
return (
HeadingStyle.DOUBLE_HEADING,
heading_name,
)
# If nothing else, then it must be a single heading.
return (
HeadingStyle.SINGLE_HEADING,
heading_name,
)
def _assert_valid_mkpk_name(name: str, line: str = "") -> None:
"""
Asserts that a name used in makka pakka is valid.
:param name: The name to be validated.
:param line: The line that the heading is defined in, for debugging.
:raises:
MKPKNameError - When the heading name is invalid.
"""
if not isinstance(name, str):
raise MKPKInvalidParameter("name", "_assert_valid_mkpk_name", name)
if not isinstance(line, str):
raise MKPKInvalidParameter("line", "_assert_valid_mkpk_name", line)
# Check if the name is valid, i.e in the one of the ranges [a-z][A-Z][0-9]
# [_].
# Define the valid chars using ascii value ranges
valid_chars = (
list(range(0x30, 0x3A))
+ list(range(0x41, 0x5B))
+ list(range(0x61, 0x7B))
+ [0x5F]
)
if not all([ord(char) in valid_chars for char in [*name]]) or len(name) == 0:
raise MKPKNameError(
"Encountered invalid name.",
f"The name assigned on the following line is\
invalid:\n> {line}\n\nValid names only use characters\
in the range [a-z][A-Z][0-9][_]",
ErrorType.FATAL,
) | PypiClean |
/EC2StepShell-1.1.1-py3-none-any.whl/ec2stepshell/core/ReverseShell.py | import os
import re
import traceback
from termcolor import cprint
from time import sleep
from botocore.exceptions import ClientError
from ec2stepshell.ssm.SsmWrapper import SsmWrapper
from ec2stepshell.utils.constants import const, user_config
from ec2stepshell.utils.terminal.TerminalEmulator import TerminalEmulator
class ReverseShell:
def __init__(self, profile, access_key, secret_key, token, region):
self.ssm_wrapper = SsmWrapper(
profile=profile,
access_key=access_key,
secret_key=secret_key,
token=token,
region=region
)
self.queue = {}
self.use_list_command_invocations = False
def start_shell(self, instance_id):
cprint('[x] Starting reverse shell on EC2 instance ' + instance_id, 'blue')
cprint('[x] Determining method for retrieving output', 'blue')
# true - list_command_invocations will be used
# false - get_command_invocation will be used
self.use_list_command_invocations = self.determine_retrieve_method()
self.determine_os(instance_id, user_config['os'])
terminal_emulator = self.get_shell_display(instance_id, user_config['os'])
while True:
command = input(terminal_emulator.shell_display)
if command.strip().startswith('!'):
try:
self.handle_ec2s2_command(command)
except IndexError:
cprint(f'[!] \'{command}\' failed. Check command ID and try again.', 'red')
finally:
continue
if command == '' or command.isspace():
continue
if command.strip() == 'clear' or command.strip() == 'cls':
os.system('cls' if os.name == 'nt' else 'clear')
continue
if command.startswith('cd '):
terminal_emulator.change_directory(pwd=command, strip=True)
print(end='')
continue
if command.strip() == 'exit':
cprint('[x] Exit...', 'blue')
exit()
try:
output = self.send_command_and_get_output(
document=const[user_config['os']]['document'],
command=command,
instance_id=instance_id,
directory=terminal_emulator.pwd,
)
if output is not None:
print(output.output, end='')
except Exception as e:
cprint(f'[!] An unknown local error occurred when running the given command. Help me improve the tool by creating an issue on GitHub with the error message.', 'red')
traceback.print_exc()
def get_shell_display(self, instance_id, os):
cprint('\n[x] Retrieving hostname', 'blue')
try:
hostname_obj = self.send_command_and_get_output(
document=const[os]['document'],
command='hostname',
instance_id=instance_id,
directory='.'
)
if hostname_obj is None:
cprint('[!] Hostname not retrieved. Try running with increased delay.', 'red')
exit()
hostname = hostname_obj.output.replace('\n', '')
cprint('\t Hostname: ' + hostname, 'blue')
except ClientError as ex:
if ex.response['Error']['Code'] == 'AccessDeniedException':
cprint("[!] Permission ssm:SendCommand is denied.", 'red')
cprint("[!] Execution can not continue.", 'red')
cprint("[x] Exit...", 'blue')
exit()
cprint('[x] Retrieving working directory', 'blue')
pwd_obj = self.send_command_and_get_output(
document=const[os]['document'],
command='pwd',
instance_id=instance_id,
directory='.'
)
if pwd_obj is None:
cprint('[!] Working directory not retrieved. Try running with increased delay.', 'red')
exit()
pwd = pwd_obj.output.replace('\n', '')
if os == 'windows':
matches = re.findall(r".:\\.+(?=\r)", pwd_obj.output, re.MULTILINE)
pwd = matches[0]
cprint('\t Working directory: ' + pwd + '\n', 'blue')
terminal_emulator = TerminalEmulator(hostname=hostname, pwd=pwd)
status_in_progress = const['general']['command_statuses']['in_progress']
if hostname_obj.status == status_in_progress or pwd_obj.status == status_in_progress:
cprint('[~] Not all initialization information was retrieved. It is recommended to restart the shell with increased delays or number of retries.', 'yellow')
return terminal_emulator
def send_command_and_get_output(self, document, command, instance_id, directory):
execution_finished = False
output_command = None
command_id = self.ssm_wrapper.launch_command(
instance_id=instance_id,
document_name=document,
command=command,
directory=directory
)
sleep(user_config['base_sleep'])
retries = 0
while execution_finished == False:
if retries >= user_config['max_retries']:
cprint(f'[!] Maximum number of retries reached for command id {command_id}', 'red')
cprint(f'[x] You can manually retry to get the output with \'!retry {command_id}\'', 'blue')
cprint('[x] To view all commands that were not finished: \'!showqueue\'', 'blue')
self.queue[command_id] = command
break
output_command = self.ssm_wrapper.get_execution_output(
command_id=command_id,
use_list_commands=self.use_list_command_invocations,
instance_id=instance_id
)
execution_finished = output_command.is_execution_finished()
if execution_finished == False:
sleep(user_config['retry_sleep'])
retries += 1
continue
return output_command
def handle_ec2s2_command(self, raw_command):
raw_command = raw_command.strip()[1:]
params = raw_command.split(' ')
if len(params) == 0:
cprint('[!] Command can\'t be interpreted', 'red')
return
command = params[0]
if command == 'help':
cprint('[x] !showqueue', 'blue')
cprint('\t Display commands in queue. For these commands the output was not retrieved using the configured number of retries')
cprint('[x] !clearqueue', 'blue')
cprint('\t Clears the commands in queue without retrieving the output.')
cprint('[x] !retry command_id', 'blue')
cprint('\t Retry to get the output of a command. Example: \'!retry 6712a779-2ec2-4514-a946-9ab06861457f\'')
return
if command == 'retry':
if len(params) != 2:
cprint('[!] \'retry\' command needs to have the structure \'!retry command_id\'', 'red')
return
output_command = self.ssm_wrapper.get_execution_output(
command_id=params[1])
if output_command.is_execution_finished() == False:
cprint(f'[x] Command execution not finished. Current status: {output_command.status}', 'blue')
return
cprint(f'[x] Command execution finished with status \'{output_command.status}\'', 'blue')
cprint(f'[x] Output:', 'blue')
cprint(output_command.output, 'green')
if params[1] in self.queue.keys():
self.queue.pop(params[1])
return
if command == 'showqueue':
if len(self.queue.keys()) == 0:
cprint('[x] No commands in queue', 'blue')
return
cprint('[x] Commands in queue:', 'blue')
for k, v in self.queue.items():
cprint(f'\t{k}: {v}', 'blue')
cprint(f'[x] Retry to get the output for a command with \'!retry command_id\'', 'blue')
return
if command == 'clearqueue':
self.queue = {}
cprint(f'[x] Queue cleared', 'blue')
return
cprint('[!] Command unknown', 'red')
def determine_os(self, instance_id, os):
if os == '':
cprint('[~] OS not specified. Trying to determine...', 'yellow')
os = 'linux'
try:
output = self.send_command_and_get_output(
document=const[os]['document'],
command='whoami',
instance_id=instance_id,
directory='.'
)
if output is None:
cprint('[!] An error occurred. Increase delay or manually specify OS','red')
exit()
if output.status == const['general']['command_statuses']['failed']:
raise Exception('OS')
if output.status == const['general']['command_statuses']['success']:
cprint(f'[x] Instance\'s OS is {os.upper()}', 'blue')
user_config['os'] = os
else:
cprint('[!] Can\'t determine OS. Try increasing the maximum retries, the retry delay or take a guess.', 'red')
exit()
except Exception as e:
if e.args[0] == 'OS' or e.response['Error']['Code'] == 'UnsupportedPlatformType':
cprint(f'[~] Instance\'s OS is not {os.upper()}', 'yellow')
os = 'linux' if os == 'windows' else 'windows'
self.determine_os(instance_id, os)
return
if e.response['Error']['Code'] == 'InvalidInstanceId':
cprint(f'[!] Instance id is wrong or can\'t communicate with SSM', 'red')
exit()
def determine_retrieve_method(self):
has_list_permissions = self.ssm_wrapper.has_list_permissions()
if has_list_permissions:
cprint('[x] Permission ssm:ListCommandInvocations granted', 'blue')
cprint('[x] Using ssm:ListCommandInvocations as command output retrieve method', 'blue')
return True
cprint('[~] Permission ssm:ListCommandInvocations denied', 'yellow')
cprint('[~] Checking permission for ssm:GetCommandInvocation', 'yellow')
has_get_permissions = self.ssm_wrapper.has_get_permissions()
if has_get_permissions:
cprint('[x] Permission ssm:GetCommandInvocation granted', 'blue')
cprint('[x] Using ssm:GetCommandInvocation as command output retrieve method', 'blue')
return False
cprint('[~] Permission ssm:GetCommandInvocation denied', 'yellow')
cprint("[!] You don't have permissions to get the output of the command. Try some payloads relying only on ssm:SendCommand as described here: https://securitycafe.ro/2023/04/17/7-lesser-known-aws-ssm-document-techniques-for-code-execution/", 'red')
exit() | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/summernote/lang/summernote-ja-JP.js | (function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory();
else if(typeof define === 'function' && define.amd)
define([], factory);
else {
var a = factory();
for(var i in a) (typeof exports === 'object' ? exports : root)[i] = a[i];
}
})(self, function() {
return /******/ (() => { // webpackBootstrap
var __webpack_exports__ = {};
(function ($) {
$.extend($.summernote.lang, {
'ja-JP': {
font: {
bold: '太字',
italic: '斜体',
underline: '下線',
clear: 'クリア',
height: '文字高',
name: 'フォント',
strikethrough: '取り消し線',
subscript: 'Subscript',
superscript: 'Superscript',
size: '大きさ'
},
image: {
image: '画像',
insert: '画像挿入',
resizeFull: '最大化',
resizeHalf: '1/2',
resizeQuarter: '1/4',
floatLeft: '左寄せ',
floatRight: '右寄せ',
floatNone: '寄せ解除',
shapeRounded: 'Shape: Rounded',
shapeCircle: 'Shape: Circle',
shapeThumbnail: 'Shape: Thumbnail',
shapeNone: 'Shape: None',
dragImageHere: 'ここに画像をドラッグしてください',
dropImage: 'Drop image or Text',
selectFromFiles: '画像ファイルを選ぶ',
maximumFileSize: 'Maximum file size',
maximumFileSizeError: 'Maximum file size exceeded.',
url: 'URLから画像を挿入する',
remove: '画像を削除する',
original: 'Original'
},
video: {
video: '動画',
videoLink: '動画リンク',
insert: '動画挿入',
url: '動画のURL',
providers: '(YouTube, Vimeo, Vine, Instagram, DailyMotion, Youku)'
},
link: {
link: 'リンク',
insert: 'リンク挿入',
unlink: 'リンク解除',
edit: '編集',
textToDisplay: 'リンク文字列',
url: 'URLを入力してください',
openInNewWindow: '新しいウィンドウで開く'
},
table: {
table: 'テーブル',
addRowAbove: '行を上に追加',
addRowBelow: '行を下に追加',
addColLeft: '列を左に追加',
addColRight: '列を右に追加',
delRow: '行を削除',
delCol: '列を削除',
delTable: 'テーブルを削除'
},
hr: {
insert: '水平線の挿入'
},
style: {
style: 'スタイル',
p: '標準',
blockquote: '引用',
pre: 'コード',
h1: '見出し1',
h2: '見出し2',
h3: '見出し3',
h4: '見出し4',
h5: '見出し5',
h6: '見出し6'
},
lists: {
unordered: '通常リスト',
ordered: '番号リスト'
},
options: {
help: 'ヘルプ',
fullscreen: 'フルスクリーン',
codeview: 'コード表示'
},
paragraph: {
paragraph: '文章',
outdent: '字上げ',
indent: '字下げ',
left: '左寄せ',
center: '中央寄せ',
right: '右寄せ',
justify: '均等割付'
},
color: {
recent: '現在の色',
more: 'もっと見る',
background: '背景色',
foreground: '文字色',
transparent: '透明',
setTransparent: '透明にする',
reset: '標準',
resetToDefault: '標準に戻す'
},
shortcut: {
shortcuts: 'ショートカット',
close: '閉じる',
textFormatting: '文字フォーマット',
action: 'アクション',
paragraphFormatting: '文章フォーマット',
documentStyle: 'ドキュメント形式',
extraKeys: 'Extra keys'
},
help: {
'insertParagraph': '改行挿入',
'undo': '一旦、行った操作を戻す',
'redo': '最後のコマンドをやり直す',
'tab': 'Tab',
'untab': 'タブ戻し',
'bold': '太文字',
'italic': '斜体',
'underline': '下線',
'strikethrough': '取り消し線',
'removeFormat': '装飾を戻す',
'justifyLeft': '左寄せ',
'justifyCenter': '真ん中寄せ',
'justifyRight': '右寄せ',
'justifyFull': 'すべてを整列',
'insertUnorderedList': '行頭に●を挿入',
'insertOrderedList': '行頭に番号を挿入',
'outdent': '字下げを戻す(アウトデント)',
'indent': '字下げする(インデント)',
'formatPara': '段落(P tag)指定',
'formatH1': 'H1指定',
'formatH2': 'H2指定',
'formatH3': 'H3指定',
'formatH4': 'H4指定',
'formatH5': 'H5指定',
'formatH6': 'H6指定',
'insertHorizontalRule': '<hr />を挿入',
'linkDialog.show': 'リンク挿入'
},
history: {
undo: '元に戻す',
redo: 'やり直す'
},
specialChar: {
specialChar: 'SPECIAL CHARACTERS',
select: 'Select Special characters'
}
}
});
})(jQuery);
/******/ return __webpack_exports__;
/******/ })()
;
});
//# sourceMappingURL=summernote-ja-JP.js.map | PypiClean |
/MLMath-0.0.0rc1.tar.gz/MLMath-0.0.0rc1/mlm/tf/_compile_cuda.py | import os
from os.path import isfile, isdir, abspath, join, dirname
import subprocess
def compile_cuda_ops(gcc: str = None,
nvcc: str = None,
cuda_lib: str = None):
tf_gcc = check_tf_cuda_compatibility()
if gcc is None:
gcc = tf_gcc if isfile(tf_gcc) else 'gcc'
if nvcc is None:
nvcc = '/usr/local/cuda/bin/nvcc' if isfile('/usr/local/cuda/bin/nvcc') else 'nvcc'
if cuda_lib is None:
cuda_lib = '/usr/local/cuda/lib64/'
mlm_tf_path = abspath(dirname(__file__))
src_path = join(mlm_tf_path, 'cuda', 'src')
build_path = join(mlm_tf_path, 'cuda', 'build')
logfile_path = join(mlm_tf_path, 'cuda', 'log.txt')
print("Source Path:\t" + src_path)
print("Build Path:\t" + build_path)
print("GCC:\t\t" + gcc)
print("NVCC:\t\t" + nvcc)
print("CUDA lib:\t" + cuda_lib)
print("----------------------------")
# Remove old build files
if isdir(build_path):
print('Removing old build files from %s' % build_path)
for file in os.listdir(build_path):
os.remove(join(build_path, file))
else:
print('Creating build directory at %s' % build_path)
os.mkdir(build_path)
print('Compiling CUDA code...')
with open(logfile_path, "w") as logfile:
try:
compile_cuda('resample', nvcc, src_path, build_path, logfile=logfile)
compile_gcc('resample', gcc, src_path, build_path, cuda_lib, logfile=logfile)
compile_cuda('resample_gradient', nvcc, src_path, build_path, logfile=logfile)
compile_gcc('resample_gradient', gcc, src_path, build_path, cuda_lib, logfile=logfile)
# compile_cuda('bicgstab_ilu_linear_solve_op', self.nvcc, src_path, build_path, logfile=logfile)
# compile_gcc('bicgstab_ilu_linear_solve_op', self.gcc, src_path, build_path, self.cuda_lib, logfile=logfile)
except BaseException as err:
print(f"Compilation failed. See {logfile_path} for details.")
raise err
print(f"Compilation complete. See {logfile_path} for details.")
def check_tf_cuda_compatibility():
import tensorflow
build = tensorflow.sysconfig.get_build_info() # is_rocm_build, cuda_compute_capabilities
tf_gcc = build['cpu_compiler']
is_cuda_build = build['is_cuda_build']
print(f"TensorFlow compiler: {tf_gcc}.")
if not is_cuda_build:
raise AssertionError("Your TensorFlow build does not support CUDA.")
else:
cuda_version = build['cuda_version']
cudnn_version = build['cudnn_version']
print(f"TensorFlow was compiled against CUDA {cuda_version} and cuDNN {cudnn_version}.")
return tf_gcc
def compile_cuda(file_names, nvcc, source_dir, target_dir, logfile):
import tensorflow
tf_cflags = tensorflow.sysconfig.get_compile_flags()
command = [
nvcc,
join(source_dir, f'{file_names}.cu.cc'),
'-o', join(target_dir, f'{file_names}.cu.o'),
'-std=c++11',
'-c',
'-D GOOGLE_CUDA=1',
'-x', 'cu',
'-Xcompiler',
'-fPIC',
'--expt-relaxed-constexpr',
'-DNDEBUG',
'-O3'
] + tf_cflags
print(f"nvcc {file_names}")
logfile.writelines(["\n", " ".join(command), "\n"])
subprocess.check_call(command, stdout=logfile, stderr=logfile)
def compile_gcc(file_names, gcc, source_dir, target_dir, cuda_lib, logfile):
import tensorflow
tf_cflags = tensorflow.sysconfig.get_compile_flags()
tf_lflags = tensorflow.sysconfig.get_link_flags()
link_cuda_lib = '-L' + cuda_lib
command = [
gcc,
join(source_dir, f'{file_names}.cc'),
join(target_dir, f'{file_names}.cu.o'),
'-o', join(target_dir, f'{file_names}.so'),
'-std=c++11',
'-shared',
'-fPIC',
'-lcudart',
'-O3',
link_cuda_lib
] + tf_cflags + tf_lflags
print(f"gcc {file_names}")
logfile.writelines(["\n", " ".join(command), "\n"])
subprocess.check_call(command, stdout=logfile, stderr=logfile) | PypiClean |
/LiBai-0.1.1.tar.gz/LiBai-0.1.1/libai/data/data_utils/indexed_dataset.py |
# copied from fairseq/fairseq/data/indexed_dataset.py
# Removed IndexedRawTextDataset since it relied on Fairseq dictionary
# other slight modifications to remove fairseq dependencies
# Added document index to index file and made it accessible.
# An empty sentence no longer separates documents.
import logging
import os
import shutil
import struct
import time
from functools import lru_cache
from itertools import accumulate
import numpy as np
import oneflow as flow
logger = logging.getLogger(__name__)
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ["lazy", "cached", "mmap"]
def infer_dataset_impl(path):
if IndexedDataset.exists(path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return "cached"
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return "mmap"
else:
return None
else:
logger.info(f"Dataset does not exist: {path}")
logger.info(
"Path should be a basename that both .idx and .bin can be "
"appended to get full filenames."
)
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == "mmap":
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, skip_warmup=False):
if not IndexedDataset.exists(path):
logger.info(f"Dataset does not exist: {path}")
logger.info(
"Path should be a basename that both .idx and .bin can be "
"appended to get full filenames."
)
return None
if impl == "infer":
impl = infer_dataset_impl(path)
if impl == "lazy" and IndexedDataset.exists(path):
return IndexedDataset(path)
elif impl == "cached" and IndexedDataset.exists(path):
return IndexedCachedDataset(path)
elif impl == "mmap" and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path, skip_warmup)
logger.info(f"Unknown dataset implementation: {impl}")
return None
def dataset_exists(path, impl):
if impl == "mmap":
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16,
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + ".idx"
def data_file_path(prefix_path):
return prefix_path + ".bin"
def create_doc_idx(sizes):
doc_idx = [0]
for i, s in enumerate(sizes):
if s == 0:
doc_idx.append(i + 1)
return doc_idx
class IndexedDataset(flow.utils.data.Dataset):
"""Loader for IndexedDataset"""
_HDR_MAGIC = b"TNTIDX\x00\x00"
def __init__(self, path):
super().__init__()
self.path = path
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = f.read(8)
assert struct.unpack("<Q", version) == (1,)
code, self.element_size = struct.unpack("<QQ", f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack("<QQ", f.read(16))
self.doc_count = struct.unpack("<Q", f.read(8))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
self.doc_idx = read_longs(f, self.doc_count)
def read_data(self, path):
self.data_file = open(data_file_path(path), "rb", buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError("index out of range")
def __del__(self):
if self.data_file:
self.data_file.close()
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if not self.data_file:
self.read_data(self.path)
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
return a
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
sizes = self.sizes[self.dim_offsets[start] : self.dim_offsets[stop]]
size = sum(sizes)
a = np.empty(size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[start] * self.element_size)
self.data_file.readinto(a)
offsets = list(accumulate(sizes))
sents = np.split(a, offsets[:-1])
return sents
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path):
super().__init__(path)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx : ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx : ptx + a.size])
return a
elif isinstance(idx, slice):
# Hack just to make this work, can optimizer later if necessary
sents = []
for i in range(*idx.indices(len(self))):
sents.append(self[i])
return sents
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8,
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, "wb")
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
self.doc_idx = [0]
def add_item(self, tensor):
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def end_document(self):
self.doc_idx.append(len(self.sizes))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), "rb") as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, "wb")
index.write(b"TNTIDX\x00\x00")
index.write(struct.pack("<Q", 1))
index.write(struct.pack("<QQ", code(self.dtype), self.element_size))
index.write(struct.pack("<QQ", len(self.data_offsets) - 1, len(self.sizes)))
index.write(struct.pack("<Q", len(self.doc_idx)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
write_longs(index, self.doc_idx)
index.close()
def _warmup_mmap_file(path):
with open(path, "rb") as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(flow.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b"MMIDIDX\x00\x00"
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, "wb")
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack("<Q", 1))
self._file.write(struct.pack("<B", code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes, doc_idx):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack("<Q", len(sizes)))
self._file.write(struct.pack("<Q", len(doc_idx)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order="C"))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order="C"))
del pointers
doc_idx = np.array(doc_idx, dtype=np.int64)
self._file.write(doc_idx.tobytes(order="C"))
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path, skip_warmup=False):
with open(path, "rb") as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = struct.unpack("<Q", stream.read(8))
assert (1,) == version
(dtype_code,) = struct.unpack("<B", stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack("<Q", stream.read(8))[0]
self._doc_count = struct.unpack("<Q", stream.read(8))[0]
offset = stream.tell()
if not skip_warmup:
logger.info("warming up index mmap file...")
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode="r", order="C")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
logger.info("reading sizes...")
self._sizes = np.frombuffer(
self._bin_buffer, dtype=np.int32, count=self._len, offset=offset
)
logger.info("reading pointers...")
self._pointers = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._len,
offset=offset + self._sizes.nbytes,
)
logger.info("reading document index...")
self._doc_idx = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._doc_count,
offset=offset + self._sizes.nbytes + self._pointers.nbytes,
)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@property
def doc_idx(self):
return self._doc_idx
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path, skip_warmup=False):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path, skip_warmup)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path, skip_warmup):
self._path = path
self._index = self.Index(index_file_path(self._path), skip_warmup)
if not skip_warmup:
logger.info("warming up data mmap file...")
_warmup_mmap_file(data_file_path(self._path))
logger.info("creating numpy buffer of mmap...")
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode="r", order="C")
logger.info("creating memory view of numpy buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
ptr, size = self._index[idx]
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr
)
return np_array
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
ptr = self._index._pointers[start]
sizes = self._index._sizes[idx]
offsets = list(accumulate(sizes))
total_size = sum(sizes)
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=total_size, offset=ptr
)
sents = np.split(np_array, offsets[:-1])
return sents
def get(self, idx, offset=0, length=None):
"""Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
ptr, size = self._index[idx]
if length is None:
length = size - offset
ptr += offset * np.dtype(self._index.dtype).itemsize
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=length, offset=ptr
)
return np_array
@property
def sizes(self):
return self._index.sizes
@property
def doc_idx(self):
return self._index.doc_idx
def get_doc_idx(self):
return self._index._doc_idx
def set_doc_idx(self, doc_idx_):
self._index._doc_idx = doc_idx_
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, "wb")
self._dtype = dtype
self._sizes = []
self._doc_idx = [0]
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order="C"))
self._sizes.append(np_array.size)
def end_document(self):
self._doc_idx.append(len(self._sizes))
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), "rb") as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes, self._doc_idx)
def get_indexed_dataset(data_prefix, data_impl, skip_warmup):
logger.info("building dataset index ...")
start_time = time.time()
indexed_dataset = make_dataset(data_prefix, data_impl, skip_warmup)
assert indexed_dataset.sizes.shape[0] == indexed_dataset.doc_idx[-1]
logger.info(
"inished creating indexed dataset in {:4f} " "seconds".format(time.time() - start_time)
)
logger.info("indexed dataset stats:")
logger.info("number of documents: {}".format(indexed_dataset.doc_idx.shape[0] - 1))
logger.info("number of sentences: {}".format(indexed_dataset.sizes.shape[0]))
return indexed_dataset | PypiClean |
/htpolynet-1.0.7.3.tar.gz/htpolynet-1.0.7.3/docs/source/install.rst | ##############################
Installation and Prerequisites
##############################
Software Prequisites
--------------------
The following commands should be in your path:
1. ``antechamber``, ``parmchk2``, and ``tleap`` (`AmberTools <https://ambermd.org/GetAmber.php#ambertools>`_, version 22 or higher); preferred installation via ``conda``.
2. ``gmx`` or ``gmx_mpi`` (`Gromacs <https://manual.gromacs.org/documentation/current/index.html>`_, version 2022.1 or higher); preferred installation via compiling from source.
3. ``obabel`` (`OpenBabel <https://github.com/openbabel/openbabel>`_); preferred installation via Linux distribution package.
If you use conda/anaconda, we recommended that you create a separate Python environment running ``HTPolyNet``:
.. code-block:: console
$ conda create --name mol-env python
$ conda activate mol-env
Once this environment is created and activated, you can optionally install some prequisites:
.. code-block:: console
$ conda install numpy scipy pandas ambertools
(``ambertools`` is available from the ``conda-forge`` channel.) If you don't do this, ``setup.cfg`` in ``HTPolyNet`` will do it for you, which might take a bit.
Installation
------------
The current stable version of ``HTPolyNet`` is available at PyPI:
.. code-block:: console
$ pip install htpolynet
To install a development version of ``HTPolyNet`` you can instal from a freshly cloned Github repository:
.. code-block:: console
$ git clone [email protected]:AbramsGroup/HTPolyNet.git
$ cd HTPolyNet
$ pip install -e .
If you created the recommended python environment, make sure it is activated before running ``pip install``!
Notes
-----
If you prefer to use more recent versions of AmberTools, Gromacs, or OpenBabel than your system currently provides, you can compile the latest versions from source. **It is recommended that you deactivate any conda environment before performing any of these compilations.**
Compilation of AmberTools
#########################
Compilation of AmberTools requires ``csh``, ``flex``, and ``bison``:
.. code-block:: console
$ tar jxf AmberTools21.tar.bz2
$ cd amber_src
$ ./configure --no-X11 --skip-python gnu
$ source amber.sh
$ make install
Compilation of Gromacs
######################
You can also compile Gromacs from source, if your Linux distibution doesn't include it in its package management, or you are on a big supercomputer. The example below builds Gromacs with CUDA but without MPI (assuming you have CUDA installed):
.. code-block:: console
$ tar xfz gromacs-2022.1.tar.gz
$ cd gromacs-2022.1
$ mkdir build
$ cd build
$ cmake .. -DGMX_BUILD_OWN_FFTW=ON -DREGRESSIONTEST_DOWNLOAD=ON -DGMX_GPU=CUDA -DCMAKE_INSTALL_PREFIX=/usr/local/gromacs
$ make
$ make check
$ sudo make install
And add to your ``~/.bashrc``:
.. code-block:: console
source /usr/local/gromacs/bin/GMXRC
This should provide access to the ``gmx`` command. If you additionally compiled an MPI version (using ``-DGMX_MPI=on`` in the ``cmake`` command), you will also have access to ``gmx_mpi``; either of these commands can be used by HTPolyNet. Note that Gromacs 2016 and below have a version of ``gmx distance`` that limits the number of distances that can be calculated, so we (always) recommend using the latest Gromacs.
Compilation of ``obabel``
#########################
If your system does not have ``obabel`` installed and your Linux distribution doesn't offer a package for it (or you are not root!), you can compile it from source. Be sure to unpack `Eigen <https://eigen.tuxfamily.org/index.php?title=Main_Page>`_ first so that the ``conformer`` plug-in for ``obabel`` will work. Below I demonstrate a session in which both the Eigen and OpenBabel source packages are downloaded to ``~/Downloads`` and are unpacked in the directory ``~/build/``, and the OpenBabel installation directory is ``~/opt/obabel``.
.. code-block:: console
$ cd ~/build
$ tar jxf ~/Downloads/eigen-3.4.0.tar.bz2 # unpack only -- no need to compile
$ tar jxf ~/Downloads/openbabel-3.1.1.tar.bz2
$ cd openbabel-3.1.1
$ mkdir build
$ cd build
$ cmake .. -DEIGEN3_INCLUDE_DIR=${HOME}/build/eigen-3.4.0/ -DCMAKE_INSTALL_PREFIX=${HOME}/opt/obabel
$ make
$ make test
$ make install
You will need to ensure that ``${HOME}/opt/babel/bin`` is in your ``PATH``, ``${HOME}/opt/babel/lib`` is in your ``LD_LIBRARY_PATH``, and that the environment variable ``BABEL_LIBDIR`` is set to ``${HOME}/opt/babel/lib``.
Other Prequisites
-----------------
In order to use ``HTPolyNet`` effectively, it is recommended that you have good working knowledge of the following:
1. MD simulation in general and Gromacs specifically;
2. the General Amber Force Field (GAFF), including in particular
a. how to use ``antechamber``, ``tleap``, and ``parmchk2`` to generate GAFF parameterizations; and
b. how to use these parameterizations inside Gromacs; and
3. Polymer chemistry, at least for the systems you are interested in simulating.
| PypiClean |
/170051277_trab_final_gces-0.5.0-py3-none-any.whl/170051277_trab_final_gces/src/data_pipeline/feature_engineering/key_smash.py | from statistics import mean
class KeySmash:
"""A class for calculating metrics to indicate key smashing behavior in a text.
Key smashing is the act of typing on a keyboard in a rapid and uncontrolled manner,
often resulting in a series of random characters being entered into a document or text field.
"""
def __init__(self):
self.char_sets = {
"vowels": 'aeiouáéíóúãõ',
"consonants": 'bcdfghjklmnñpqrstvwxyz',
"special_characters": '!@#$%^¨|\'\"&*()_+:;~`´]}{[}ºª=-.¿¡'
}
def calculate_char_frequency_metric(self, text):
"""
Calculate the Char Frequency Metric.
Parameters
----------
text : str
The text to use for the calculation.
Returns
-------
float
Char Frequency Metric.
Examples
--------
>>> calculate_char_frequency_metric("PUENTECILLA KM. 1.7")
1.121212121212121
>>> calculate_char_frequency_metric("ASDASD XXXX")
3.0
"""
word_results = []
for w in text.split(' '):
char_count = []
if w and len(w) > 0:
for e in set(w):
char_count.append(w.count(e)**2)
word_results.append(sum(char_count)/len(w))
if word_results == 0 or len(word_results) == 0:
return 0
else:
return mean(word_results)
def calculate_irregular_sequence_metric(self, text, opt):
"""
Calculate the Irregular Sequence Metric.
Parameters
----------
text : str
The text to use for the calculation.
opt : str
The type of characters to consider for the calculation,
can be one of 'vowels', 'consonants', or 'special_characters'.
Returns
-------
float
Irregular Sequence Metric.
Examples
--------
>>> calculate_irregular_sequence_metric("PUENTECILLA KM. 1.7", "vowels")
0.21052631578947367
>>> calculate_irregular_sequence_metric("ASDASD XXXX", "consonants")
2.1818181818181817
>>> calculate_irregular_sequence_metric("!@#$% ASDFGHJKL", "special_characters")
1.5625
"""
count_sequence = 1
sequence_regex = []
text = str(text).lower()
opt = self.char_sets[opt]
for i in range(len(text) - 1):
if text[i] in opt and text[i + 1] in opt:
count_sequence = count_sequence + 1
else:
if (count_sequence != 1):
sequence_regex.append(count_sequence**2)
count_sequence = 1
if (count_sequence != 1):
sequence_regex.append(count_sequence**2)
return sum(sequence_regex)/len(text)
def calculate_number_count_metric(self, text):
"""
Calculate the Number Count Metric.
Parameters
----------
text : str
The text field to use for the calculation.
Returns
-------
float
Number Count Metric.
Examples
--------
>>> calculate_number_count_metric("ABC 123 !@#")
0.0
>>> calculate_number_count_metric("ABC123 !@#")
0.9
"""
text_list = text.split()
calc_num_line = 0
if text_list:
for word in text_list:
if any(char.isdigit() for char in word) and any(not char.isdigit() for char in word):
num = len([char for char in word if char.isdigit()])
calc_num = num**2
calc_num_line += calc_num
return calc_num_line / len(' '.join(text_list))
return 0 | PypiClean |
/CubeLang-0.1.4-py3-none-any.whl/libcube/actions.py | import enum
from abc import ABC, abstractmethod
from typing import Union, List, Iterable, TypeVar, Optional, Set, Dict
from .cube import Cube
from .orientation import Orientation, Side, count_occurrences
T = TypeVar("T")
class Action(ABC):
@abstractmethod
def perform(self, cube: Cube, orientation: Orientation) -> Orientation:
pass
class Rotate(Action):
_ROTATION_LETTERS: Dict[Side, str] = {
Side.RIGHT: "X",
Side.TOP: "Y",
Side.FRONT: "Z"
}
def __init__(self, around: Side, twice: bool = False) -> None:
self.axis_side: Side = around
self.twice = twice
def perform_single(self, orientation: Orientation) -> Orientation:
if self.axis_side == Side.FRONT:
return orientation.rotate_clockwise()
elif self.axis_side == Side.BACK:
return orientation.rotate_counterclockwise()
elif self.axis_side == Side.RIGHT:
return orientation.to_bottom
elif self.axis_side == Side.LEFT:
return orientation.to_top
elif self.axis_side == Side.TOP:
return orientation.to_right
else:
return orientation.to_left
def perform(self, cube: Optional[Cube], orientation: Orientation) -> Orientation:
if self.twice:
return self.perform_single(self.perform_single(orientation))
else:
return self.perform_single(orientation)
def __repr__(self):
return f"Rotate({self.axis_side}, {self.twice})"
def __str__(self):
if self.axis_side in Rotate._ROTATION_LETTERS:
letter = Rotate._ROTATION_LETTERS[self.axis_side]
else:
letter = Rotate._ROTATION_LETTERS[self.axis_side.opposite()]
if not self.twice:
letter += "'"
if self.twice:
return letter + "2"
else:
return letter
@staticmethod
def from_turn_steps(steps: Iterable[Side]) -> Iterable["Rotate"]:
for side, turns in count_occurrences(steps):
turns = turns % 4
if turns == 3:
side = side.opposite()
if turns != 0:
yield Rotate(side, turns == 2)
class TurningType(enum.Enum):
HORIZONTAL = enum.auto()
VERTICAL = enum.auto()
SLICE = enum.auto()
class Turn(Action):
TYPES = {
Side.LEFT: TurningType.VERTICAL, Side.RIGHT: TurningType.VERTICAL,
Side.TOP: TurningType.HORIZONTAL, Side.BOTTOM: TurningType.HORIZONTAL,
Side.FRONT: TurningType.SLICE, Side.BACK: TurningType.SLICE
}
def __init__(self, side: Union[Side, TurningType],
indices: Union[int, List[Union[int, type(Ellipsis)]]],
turns: int = 1) -> None:
self.indices: List[Union[int, type(Ellipsis)]] = \
indices if isinstance(indices, list) else [indices]
if not (all(x == Ellipsis or x > 0 for x in self.indices) or
all(x == Ellipsis or x < 0 for x in self.indices)):
raise ValueError("All indices of the turn action must be either "
"positive or negative")
self.turns: int = turns
self.type: TurningType
if isinstance(side, Side):
self.type = Turn.TYPES[side]
if side in {Side.BACK, Side.RIGHT, Side.BOTTOM}:
self.indices = Turn.opposite_side(self.indices)
if side not in {Side.BOTTOM, Side.RIGHT, Side.FRONT}:
self.turns: int = 4 - turns
else:
self.type = side
def perform(self, cube: Cube, orientation: Orientation) -> Orientation:
turning_functions = {
TurningType.VERTICAL: (cube.turn_vertical, cube.get_side(orientation).columns),
TurningType.HORIZONTAL: (cube.turn_horizontal, cube.get_side(orientation).rows),
TurningType.SLICE: (cube.turn_slice, cube.get_side(orientation.to_right).columns)
}
rotate_function, size = turning_functions[self.type]
for side in Turn.normalize_indices(self.indices, size):
rotate_function(orientation, side, self.turns)
return orientation
def __repr__(self):
return f"Turn({self.type}, {self.indices}, {self.turns})"
def __str__(self):
opposite = any(x != Ellipsis and x < 0 for x in self.indices)
if self.type == TurningType.VERTICAL:
letter = "L" if not opposite else "R"
elif self.type == TurningType.HORIZONTAL:
letter = "U" if not opposite else "D"
else:
letter = "F" if not opposite else "B"
if self.turns == 2:
letter += "2"
else:
turns = self.turns
if letter not in {"D", "R", "F"}:
turns = 4 - self.turns
if turns == 3:
letter += "'"
if self.indices != [1] and self.indices != [-1]:
string = []
prev_ellipsis = True
for index in self.indices:
if index == Ellipsis:
string.append(":")
prev_ellipsis = True
else:
if not prev_ellipsis:
string.append(",")
prev_ellipsis = False
string.append(str(abs(index)))
letter += "[" + "".join(string) + "]"
return letter
def _transform(self, turn: Side) -> "Turn":
if Turn.TYPES[turn] == self.type:
return self
if turn == Side.TOP:
if self.type == TurningType.SLICE:
return Turn(TurningType.VERTICAL, self.indices, 4 - self.turns)
else: # TurningType.VERTICAL
return Turn(TurningType.SLICE, Turn.opposite_side(self.indices), self.turns)
elif turn == Side.FRONT:
if self.type == TurningType.VERTICAL:
return Turn(TurningType.HORIZONTAL, self.indices, self.turns)
else: # TurningType.HORIZONTAL
return Turn(TurningType.VERTICAL, Turn.opposite_side(self.indices), 4 - self.turns)
elif turn == Side.RIGHT:
if self.type == TurningType.SLICE:
return Turn(TurningType.HORIZONTAL, self.indices, 4 - self.turns)
else: # TurningType.HORIZONTAL
return Turn(TurningType.SLICE, Turn.opposite_side(self.indices), self.turns)
else:
raise ValueError("Unsupported turn")
def from_orientation(self, orientation: Orientation, origin=Orientation()) -> "Turn":
result: Turn = self
for turn in list(orientation.turns_to(origin)):
result = result._transform(turn)
return result
@staticmethod
def opposite_side(indices: List[Union[int, type(Ellipsis)]]) \
-> List[Union[int, type(Ellipsis)]]:
return [Ellipsis if index == Ellipsis else -index for index in indices]
@staticmethod
def normalize_indices(indices: List[Union[int, type(Ellipsis)]], width: int) -> Set[int]:
def to_positive(idx: Union[int, type(Ellipsis)]):
if idx == Ellipsis:
return ...
elif idx < 0:
return width + idx + 1
else:
return idx
def add_ends(it: Iterable[Union[int, type(Ellipsis)]]):
yield 0
yield from it
yield width + 1
def get_tripples(it: Iterable[Union[int, type(Ellipsis)]]):
v_iter = iter(it)
a = next(v_iter)
b = next(v_iter)
while True:
try:
c = next(v_iter)
yield a, b, c
a, b = b, c
except StopIteration:
break
result = set()
stream = add_ends(map(to_positive, indices))
for prev_value, value, next_value in get_tripples(stream):
if value == Ellipsis:
if prev_value > next_value:
prev_value, next_value = next_value, prev_value
result.update(range(prev_value + 1, next_value))
else:
result.add(value)
return result | PypiClean |
/Idmeneo_cdQa-0.0.tar.gz/Idmeneo_cdQa-0.0/retriever/text_transformers.py |
import numpy as np
import scipy.sparse as sp
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted, check_array, FLOAT_DTYPES
from sklearn.feature_extraction.text import _document_frequency
from sklearn.preprocessing import normalize
class BM25Transformer(BaseEstimator, TransformerMixin):
"""
Parameters
----------
norm : 'l1', 'l2' or None, optional (default=None)
Each output row will have unit norm, either:
* 'l2': Sum of squares of vector elements is 1. The cosine
similarity between two vectors is their dot product when l2 norm has
been applied.
* 'l1': Sum of absolute values of vector elements is 1.
use_idf : boolean, optional (default=True)
Enable inverse-document-frequency reweighting
k1 : float, optional (default=2.0)
term k1 in the BM25 formula
b : float, optional (default=0.75)
term b in the BM25 formula
floor : float or None, optional (default=None)
floor value for idf terms
References
----------
Okapi BM25: a non-binary model - Introduction to Information Retrieval
http://nlp.stanford.edu/IR-book/html/htmledition/okapi-bm25-a-non-binary-model-1.html
"""
def __init__(self, norm=None, use_idf=True, k1=2.0, b=0.75, floor=None):
self.norm = norm
self.use_idf = use_idf
self.k1 = k1
self.b = b
self.floor = floor
def fit(self, X):
"""
Parameters
----------
X : sparse matrix, [n_samples, n_features]
document-term matrix
"""
X = check_array(X, accept_sparse=("csr", "csc"))
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
idf = np.log((n_samples - df + 0.5) / (df + 0.5))
if self.floor is not None:
idf = idf * (idf > self.floor) + self.floor * (idf < self.floor)
self._idf_diag = sp.spdiags(idf, diags=0, m=n_features, n=n_features)
# Create BM25 features
# Document length (number of terms) in each row
# Shape is (n_samples, 1)
dl = X.sum(axis=1)
# Number of non-zero elements in each row
# Shape is (n_samples, )
sz = X.indptr[1:] - X.indptr[0:-1]
# In each row, repeat `dl` for `sz` times
# Shape is (sum(sz), )
# Example
# -------
# dl = [4, 5, 6]
# sz = [1, 2, 3]
# rep = [4, 5, 5, 6, 6, 6]
rep = np.repeat(np.asarray(dl), sz)
# Average document length
# Scalar value
avgdl = np.average(dl)
# Compute BM25 score only for non-zero elements
data = (
X.data
* (self.k1 + 1)
/ (X.data + self.k1 * (1 - self.b + self.b * rep / avgdl))
)
X = sp.csr_matrix((data, X.indices, X.indptr), shape=X.shape)
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
self._doc_matrix = X
return self
def transform(self, X=None, copy=True, is_query=False):
"""
Parameters
----------
X : sparse matrix, [n_samples, n_features]
document-term query matrix
copy : boolean, optional (default=True)
query: boolean (default=False)
whether to transform a query or the documents database
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if is_query:
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, copy=copy)
if not sp.issparse(X):
X = sp.csr_matrix(X, dtype=np.float64)
n_samples, n_features = X.shape
expected_n_features = self._doc_matrix.shape[1]
if n_features != expected_n_features:
raise ValueError(
"Input has n_features=%d while the model"
" has been trained with n_features=%d"
% (n_features, expected_n_features)
)
if self.use_idf:
check_is_fitted(self, "_idf_diag", "idf vector is not fitted")
X = sp.csr_matrix(X.toarray() * self._idf_diag.diagonal())
return X
else:
return self._doc_matrix
@property
def idf_(self):
# if _idf_diag is not set, this will raise an attribute error,
# which means hasattr(self, "idf_") is False
return np.ravel(self._idf_diag.sum(axis=0))
@idf_.setter
def idf_(self, value):
value = np.asarray(value, dtype=np.float64)
n_features = value.shape[0]
self._idf_diag = sp.spdiags(
value, diags=0, m=n_features, n=n_features, format="csr"
) | PypiClean |
/Netzob-2.0.0.tar.gz/Netzob-2.0.0/src/netzob/Model/Vocabulary/ApplicativeData.py |
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2017 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : [email protected] |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Related third party imports
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Local application imports
#+---------------------------------------------------------------------------+
from netzob.Common.Utils.Decorators import typeCheck, NetzobLogger
@NetzobLogger
class ApplicativeData(object):
"""An applicative data represents an information used over the application
that generated the captured flows. It can be the player name or the user email address
if these informations are used somehow by the protocol.
An applicative data can be created out of any information.
>>> from netzob.all import *
>>> app = ApplicativeData("Username", String("toto"))
>>> print(app.name)
Username
>>> app1 = ApplicativeData("Email", String("[email protected]"))
>>> print(app1.value)
String('[email protected]')
"""
def __init__(self, name, value):
self.name = name
self.value = value
@property
def name(self):
"""The name of the applicative data.
:type: :mod:`str`
"""
return self.__name
@name.setter # type: ignore
@typeCheck(str)
def name(self, name):
if name is None:
raise TypeError("Name cannot be None")
self.__name = name
@property
def value(self):
"""The value of the applicative data.
:type: object
"""
return self.__value
@value.setter # type: ignore
def value(self, value):
if value is None:
raise TypeError("Value cannot be None")
self.__value = value
def __str__(self):
"""Redefine the string representation of the current
applicative Data.
:return: the string representation of the applicative data
:rtype: str
"""
return "Applicative Data: {0}={1})".format(self.name, self.value) | PypiClean |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.